diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..73081fc1 --- /dev/null +++ b/404.html @@ -0,0 +1,308 @@ + + + + + + + + Triply Documentation + + + + + + + + + +
+ + +
+ +
+
+
    +
  • +
  • +
  • +
+
+
+
+
+ + +

404

+ +

Page not found

+ + +
+
+ +
+
+ +
+ +
+ +
+ + + + + +
+ + + + + + + diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..044bf1c6 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +docs.triply.cc diff --git a/assets/Add-new-story-element-UI.png b/assets/Add-new-story-element-UI.png new file mode 100644 index 00000000..df3adc18 Binary files /dev/null and b/assets/Add-new-story-element-UI.png differ diff --git a/assets/Add-new-story-element-query-UI.png b/assets/Add-new-story-element-query-UI.png new file mode 100644 index 00000000..a9760efd Binary files /dev/null and b/assets/Add-new-story-element-query-UI.png differ diff --git a/assets/ClassDiagram.png b/assets/ClassDiagram.png new file mode 100644 index 00000000..3ac33e24 Binary files /dev/null and b/assets/ClassDiagram.png differ diff --git a/assets/Create-datastory-UI-2.png b/assets/Create-datastory-UI-2.png new file mode 100644 index 00000000..8dd429f0 Binary files /dev/null and b/assets/Create-datastory-UI-2.png differ diff --git a/assets/MonaLisaGraph.png b/assets/MonaLisaGraph.png new file mode 100644 index 00000000..6706d1d8 Binary files /dev/null and b/assets/MonaLisaGraph.png differ diff --git a/assets/My-first-story-UI.png b/assets/My-first-story-UI.png new file mode 100644 index 00000000..88860758 Binary files /dev/null and b/assets/My-first-story-UI.png differ diff --git a/assets/access-level-control.png b/assets/access-level-control.png new file mode 100644 index 00000000..a7ddc1b1 Binary files /dev/null and b/assets/access-level-control.png differ diff --git a/assets/access-level-dependencies.png b/assets/access-level-dependencies.png new file mode 100644 index 00000000..9fbf4c98 Binary files /dev/null and b/assets/access-level-dependencies.png differ diff --git a/assets/add-data-from-a-url-field.png b/assets/add-data-from-a-url-field.png new file mode 100644 index 00000000..6d032e9a Binary files /dev/null and b/assets/add-data-from-a-url-field.png differ diff --git a/assets/add-data-from-an-existing-dataset-choose-graphs.png b/assets/add-data-from-an-existing-dataset-choose-graphs.png new file mode 100644 index 00000000..073cc12c Binary files /dev/null and b/assets/add-data-from-an-existing-dataset-choose-graphs.png differ diff --git a/assets/add-data-from-an-existing-dataset-field.png b/assets/add-data-from-an-existing-dataset-field.png new file mode 100644 index 00000000..29c2c541 Binary files /dev/null and b/assets/add-data-from-an-existing-dataset-field.png differ diff --git a/assets/add-data-from-files-dialog.png b/assets/add-data-from-files-dialog.png new file mode 100644 index 00000000..d3e950be Binary files /dev/null and b/assets/add-data-from-files-dialog.png differ diff --git a/assets/add-data-from-files-job.png b/assets/add-data-from-files-job.png new file mode 100644 index 00000000..5924aab4 Binary files /dev/null and b/assets/add-data-from-files-job.png differ diff --git a/assets/add-data-pane.png b/assets/add-data-pane.png new file mode 100644 index 00000000..0c90322e Binary files /dev/null and b/assets/add-data-pane.png differ diff --git a/assets/add-dataset-dialog.png b/assets/add-dataset-dialog.png new file mode 100644 index 00000000..2c0e3260 Binary files /dev/null and b/assets/add-dataset-dialog.png differ diff --git a/assets/admin-settings.png b/assets/admin-settings.png new file mode 100644 index 00000000..3f79ee0a Binary files /dev/null and b/assets/admin-settings.png differ diff --git a/assets/audio.png b/assets/audio.png new file mode 100644 index 00000000..44bc1220 Binary files /dev/null and b/assets/audio.png differ diff --git a/assets/choice-menu-content-status.png b/assets/choice-menu-content-status.png new file mode 100644 index 00000000..94ce1fa7 Binary files /dev/null and b/assets/choice-menu-content-status.png differ diff --git a/assets/choice-menu-pattern-search.png b/assets/choice-menu-pattern-search.png new file mode 100644 index 00000000..208cad62 Binary files /dev/null and b/assets/choice-menu-pattern-search.png differ diff --git a/assets/choose-class.png b/assets/choose-class.png new file mode 100644 index 00000000..0e792f48 Binary files /dev/null and b/assets/choose-class.png differ diff --git a/assets/cog-wheel.png b/assets/cog-wheel.png new file mode 100644 index 00000000..8753bdec Binary files /dev/null and b/assets/cog-wheel.png differ diff --git a/assets/copyQuery.png b/assets/copyQuery.png new file mode 100644 index 00000000..e602fb10 Binary files /dev/null and b/assets/copyQuery.png differ diff --git a/assets/create-service.png b/assets/create-service.png new file mode 100644 index 00000000..51b910b7 Binary files /dev/null and b/assets/create-service.png differ diff --git a/assets/data-stories-embed.png b/assets/data-stories-embed.png new file mode 100644 index 00000000..b5c430dc Binary files /dev/null and b/assets/data-stories-embed.png differ diff --git a/assets/dataset-homepage-no-data.png b/assets/dataset-homepage-no-data.png new file mode 100644 index 00000000..24f6538b Binary files /dev/null and b/assets/dataset-homepage-no-data.png differ diff --git a/assets/dataset-homepage-with-metadata.png b/assets/dataset-homepage-with-metadata.png new file mode 100644 index 00000000..ff772441 Binary files /dev/null and b/assets/dataset-homepage-with-metadata.png differ diff --git a/assets/dataset-homepage.png b/assets/dataset-homepage.png new file mode 100644 index 00000000..38aafffd Binary files /dev/null and b/assets/dataset-homepage.png differ diff --git a/assets/dataset-settings-page.png b/assets/dataset-settings-page.png new file mode 100644 index 00000000..d11f3ea6 Binary files /dev/null and b/assets/dataset-settings-page.png differ diff --git a/assets/delete-query.png b/assets/delete-query.png new file mode 100644 index 00000000..09301c67 Binary files /dev/null and b/assets/delete-query.png differ diff --git a/assets/description.png b/assets/description.png new file mode 100644 index 00000000..fe029275 Binary files /dev/null and b/assets/description.png differ diff --git a/assets/editing-the-data-model.png b/assets/editing-the-data-model.png new file mode 100644 index 00000000..3cb39c63 Binary files /dev/null and b/assets/editing-the-data-model.png differ diff --git a/assets/export-all-graphs.png b/assets/export-all-graphs.png new file mode 100644 index 00000000..c0e19bde Binary files /dev/null and b/assets/export-all-graphs.png differ diff --git a/assets/export-dataset.png b/assets/export-dataset.png new file mode 100644 index 00000000..8262ce43 Binary files /dev/null and b/assets/export-dataset.png differ diff --git a/assets/export-one-graph.png b/assets/export-one-graph.png new file mode 100644 index 00000000..06310ed3 Binary files /dev/null and b/assets/export-one-graph.png differ diff --git a/assets/extract.png b/assets/extract.png new file mode 100644 index 00000000..2860f2de Binary files /dev/null and b/assets/extract.png differ diff --git a/assets/geo.png b/assets/geo.png new file mode 100644 index 00000000..61276243 Binary files /dev/null and b/assets/geo.png differ diff --git a/assets/graphs-page.png b/assets/graphs-page.png new file mode 100644 index 00000000..e758b67d Binary files /dev/null and b/assets/graphs-page.png differ diff --git a/assets/groups-and-order.png b/assets/groups-and-order.png new file mode 100644 index 00000000..64c103a7 Binary files /dev/null and b/assets/groups-and-order.png differ diff --git a/assets/groups-choice-list.png b/assets/groups-choice-list.png new file mode 100644 index 00000000..73cc9831 Binary files /dev/null and b/assets/groups-choice-list.png differ diff --git a/assets/home-screen-logged-in.png b/assets/home-screen-logged-in.png new file mode 100644 index 00000000..0140bdf6 Binary files /dev/null and b/assets/home-screen-logged-in.png differ diff --git a/assets/html-plain.png b/assets/html-plain.png new file mode 100644 index 00000000..a2e8907f Binary files /dev/null and b/assets/html-plain.png differ diff --git a/assets/html-rendered.png b/assets/html-rendered.png new file mode 100644 index 00000000..181321df Binary files /dev/null and b/assets/html-rendered.png differ diff --git a/assets/html-schema.png b/assets/html-schema.png new file mode 100644 index 00000000..f095e5cf Binary files /dev/null and b/assets/html-schema.png differ diff --git a/assets/image.png b/assets/image.png new file mode 100644 index 00000000..566303a4 Binary files /dev/null and b/assets/image.png differ diff --git a/assets/internal.png b/assets/internal.png new file mode 100644 index 00000000..9e49929c Binary files /dev/null and b/assets/internal.png differ diff --git a/assets/json-ld-in-api.png b/assets/json-ld-in-api.png new file mode 100644 index 00000000..9f4a5219 Binary files /dev/null and b/assets/json-ld-in-api.png differ diff --git a/assets/json-ld-navigator.png b/assets/json-ld-navigator.png new file mode 100644 index 00000000..258fc5fe Binary files /dev/null and b/assets/json-ld-navigator.png differ diff --git a/assets/json-ld-result.png b/assets/json-ld-result.png new file mode 100644 index 00000000..ff1f0251 Binary files /dev/null and b/assets/json-ld-result.png differ diff --git a/assets/json-ld-script.png b/assets/json-ld-script.png new file mode 100644 index 00000000..1e9e25de Binary files /dev/null and b/assets/json-ld-script.png differ diff --git a/assets/label.png b/assets/label.png new file mode 100644 index 00000000..f38a0394 Binary files /dev/null and b/assets/label.png differ diff --git a/assets/landing-page.png b/assets/landing-page.png new file mode 100644 index 00000000..47c6a5d2 Binary files /dev/null and b/assets/landing-page.png differ diff --git a/assets/ld-browser.png b/assets/ld-browser.png new file mode 100644 index 00000000..bd9e6e22 Binary files /dev/null and b/assets/ld-browser.png differ diff --git a/assets/linked-data-table.png b/assets/linked-data-table.png new file mode 100644 index 00000000..157c8dd1 Binary files /dev/null and b/assets/linked-data-table.png differ diff --git a/assets/login-page.png b/assets/login-page.png new file mode 100644 index 00000000..0d2bbc7b Binary files /dev/null and b/assets/login-page.png differ diff --git a/assets/metadata-settings.png b/assets/metadata-settings.png new file mode 100644 index 00000000..82aabf7d Binary files /dev/null and b/assets/metadata-settings.png differ diff --git a/assets/name-IRI.png b/assets/name-IRI.png new file mode 100644 index 00000000..159e49e4 Binary files /dev/null and b/assets/name-IRI.png differ diff --git a/assets/open-the-editor.png b/assets/open-the-editor.png new file mode 100644 index 00000000..072d3a4d Binary files /dev/null and b/assets/open-the-editor.png differ diff --git a/assets/organization.png b/assets/organization.png new file mode 100644 index 00000000..48687e46 Binary files /dev/null and b/assets/organization.png differ diff --git a/assets/pokemon-gallery.png b/assets/pokemon-gallery.png new file mode 100644 index 00000000..eb097414 Binary files /dev/null and b/assets/pokemon-gallery.png differ diff --git a/assets/private.png b/assets/private.png new file mode 100644 index 00000000..4dd8a227 Binary files /dev/null and b/assets/private.png differ diff --git a/assets/public.png b/assets/public.png new file mode 100644 index 00000000..a2b32c3c Binary files /dev/null and b/assets/public.png differ diff --git a/assets/queryResult.png b/assets/queryResult.png new file mode 100644 index 00000000..f0dad110 Binary files /dev/null and b/assets/queryResult.png differ diff --git a/assets/queryResult2.png b/assets/queryResult2.png new file mode 100644 index 00000000..cbb38c94 Binary files /dev/null and b/assets/queryResult2.png differ diff --git a/assets/repository.png b/assets/repository.png new file mode 100644 index 00000000..7c0ffd56 Binary files /dev/null and b/assets/repository.png differ diff --git a/assets/save-query-highlighted.png b/assets/save-query-highlighted.png new file mode 100644 index 00000000..694815aa Binary files /dev/null and b/assets/save-query-highlighted.png differ diff --git a/assets/saved-query-metadata.png b/assets/saved-query-metadata.png new file mode 100644 index 00000000..accde3de Binary files /dev/null and b/assets/saved-query-metadata.png differ diff --git a/assets/selectOrganization.png b/assets/selectOrganization.png new file mode 100644 index 00000000..e24a1f59 Binary files /dev/null and b/assets/selectOrganization.png differ diff --git a/assets/service-widget.png b/assets/service-widget.png new file mode 100644 index 00000000..dcf9f218 Binary files /dev/null and b/assets/service-widget.png differ diff --git a/assets/slack-widget.png b/assets/slack-widget.png new file mode 100644 index 00000000..79eadaad Binary files /dev/null and b/assets/slack-widget.png differ diff --git a/assets/sparql-table-example.png b/assets/sparql-table-example.png new file mode 100644 index 00000000..6db502c7 Binary files /dev/null and b/assets/sparql-table-example.png differ diff --git a/assets/transfer-query.png b/assets/transfer-query.png new file mode 100644 index 00000000..28c71648 Binary files /dev/null and b/assets/transfer-query.png differ diff --git a/assets/transfer-to-account.png b/assets/transfer-to-account.png new file mode 100644 index 00000000..26ace6da Binary files /dev/null and b/assets/transfer-to-account.png differ diff --git a/assets/transfer-to-organization.png b/assets/transfer-to-organization.png new file mode 100644 index 00000000..27f0cdd8 Binary files /dev/null and b/assets/transfer-to-organization.png differ diff --git a/assets/type.png b/assets/type.png new file mode 100644 index 00000000..8c5875ab Binary files /dev/null and b/assets/type.png differ diff --git a/assets/update-dataset-profile-pane.png b/assets/update-dataset-profile-pane.png new file mode 100644 index 00000000..d6bd782c Binary files /dev/null and b/assets/update-dataset-profile-pane.png differ diff --git a/assets/upload-error.png b/assets/upload-error.png new file mode 100644 index 00000000..3581a8a6 Binary files /dev/null and b/assets/upload-error.png differ diff --git a/assets/use-of-order.png b/assets/use-of-order.png new file mode 100644 index 00000000..c55e68cc Binary files /dev/null and b/assets/use-of-order.png differ diff --git a/assets/user-screen-logged-in.png b/assets/user-screen-logged-in.png new file mode 100644 index 00000000..81fc2407 Binary files /dev/null and b/assets/user-screen-logged-in.png differ diff --git a/assets/user.png b/assets/user.png new file mode 100644 index 00000000..e2ed3263 Binary files /dev/null and b/assets/user.png differ diff --git a/assets/video.png b/assets/video.png new file mode 100644 index 00000000..36aa3a4d Binary files /dev/null and b/assets/video.png differ diff --git a/assets/webhook.png b/assets/webhook.png new file mode 100644 index 00000000..31b66805 Binary files /dev/null and b/assets/webhook.png differ diff --git a/assets/webhook_trigger_history.png b/assets/webhook_trigger_history.png new file mode 100644 index 00000000..cea09c65 Binary files /dev/null and b/assets/webhook_trigger_history.png differ diff --git a/assets/where-find-transfer.png b/assets/where-find-transfer.png new file mode 100644 index 00000000..61e9c00a Binary files /dev/null and b/assets/where-find-transfer.png differ diff --git a/assets/yasgui.png b/assets/yasgui.png new file mode 100644 index 00000000..e9bddbd0 Binary files /dev/null and b/assets/yasgui.png differ diff --git a/css/fonts/Roboto-Slab-Bold.woff b/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 00000000..6cb60000 Binary files /dev/null and b/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/css/fonts/Roboto-Slab-Bold.woff2 b/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 00000000..7059e231 Binary files /dev/null and b/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/css/fonts/Roboto-Slab-Regular.woff b/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 00000000..f815f63f Binary files /dev/null and b/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/css/fonts/Roboto-Slab-Regular.woff2 b/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 00000000..f2c76e5b Binary files /dev/null and b/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/css/fonts/fontawesome-webfont.eot b/css/fonts/fontawesome-webfont.eot new file mode 100644 index 00000000..e9f60ca9 Binary files /dev/null and b/css/fonts/fontawesome-webfont.eot differ diff --git a/css/fonts/fontawesome-webfont.svg b/css/fonts/fontawesome-webfont.svg new file mode 100644 index 00000000..855c845e --- /dev/null +++ b/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/css/fonts/fontawesome-webfont.ttf b/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 00000000..35acda2f Binary files /dev/null and b/css/fonts/fontawesome-webfont.ttf differ diff --git a/css/fonts/fontawesome-webfont.woff b/css/fonts/fontawesome-webfont.woff new file mode 100644 index 00000000..400014a4 Binary files /dev/null and b/css/fonts/fontawesome-webfont.woff differ diff --git a/css/fonts/fontawesome-webfont.woff2 b/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 00000000..4d13fc60 Binary files /dev/null and b/css/fonts/fontawesome-webfont.woff2 differ diff --git a/css/fonts/lato-bold-italic.woff b/css/fonts/lato-bold-italic.woff new file mode 100644 index 00000000..88ad05b9 Binary files /dev/null and b/css/fonts/lato-bold-italic.woff differ diff --git a/css/fonts/lato-bold-italic.woff2 b/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 00000000..c4e3d804 Binary files /dev/null and b/css/fonts/lato-bold-italic.woff2 differ diff --git a/css/fonts/lato-bold.woff b/css/fonts/lato-bold.woff new file mode 100644 index 00000000..c6dff51f Binary files /dev/null and b/css/fonts/lato-bold.woff differ diff --git a/css/fonts/lato-bold.woff2 b/css/fonts/lato-bold.woff2 new file mode 100644 index 00000000..bb195043 Binary files /dev/null and b/css/fonts/lato-bold.woff2 differ diff --git a/css/fonts/lato-normal-italic.woff b/css/fonts/lato-normal-italic.woff new file mode 100644 index 00000000..76114bc0 Binary files /dev/null and b/css/fonts/lato-normal-italic.woff differ diff --git a/css/fonts/lato-normal-italic.woff2 b/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 00000000..3404f37e Binary files /dev/null and b/css/fonts/lato-normal-italic.woff2 differ diff --git a/css/fonts/lato-normal.woff b/css/fonts/lato-normal.woff new file mode 100644 index 00000000..ae1307ff Binary files /dev/null and b/css/fonts/lato-normal.woff differ diff --git a/css/fonts/lato-normal.woff2 b/css/fonts/lato-normal.woff2 new file mode 100644 index 00000000..3bf98433 Binary files /dev/null and b/css/fonts/lato-normal.woff2 differ diff --git a/css/intellij-light.css b/css/intellij-light.css new file mode 100644 index 00000000..97fc3e2d --- /dev/null +++ b/css/intellij-light.css @@ -0,0 +1 @@ +pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}.hljs{color:#000;background:#fff}.hljs-subst,.hljs-title{font-weight:400;color:#000}.hljs-title.function_{color:#7a7a43}.hljs-code,.hljs-comment,.hljs-quote{color:#8c8c8c;font-style:italic}.hljs-meta{color:#9e880d}.hljs-section{color:#871094}.hljs-built_in,.hljs-keyword,.hljs-literal,.hljs-meta .hljs-keyword,.hljs-name,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-id,.hljs-selector-pseudo,.hljs-selector-tag,.hljs-symbol,.hljs-template-tag,.hljs-type,.hljs-variable.language_{color:#0033b3}.hljs-attr,.hljs-property{color:#871094}.hljs-attribute{color:#174ad4}.hljs-number{color:#1750eb}.hljs-regexp{color:#264eff}.hljs-link{text-decoration:underline;color:#006dcc}.hljs-meta .hljs-string,.hljs-string{color:#067d17}.hljs-char.escape_{color:#0037a6}.hljs-doctag{text-decoration:underline}.hljs-template-variable{color:#248f8f}.hljs-addition{background:#bee6be}.hljs-deletion{background:#d6d6d6}.hljs-emphasis{font-style:italic}.hljs-strong{font-weight:700} \ No newline at end of file diff --git a/css/theme.css b/css/theme.css new file mode 100644 index 00000000..ad773009 --- /dev/null +++ b/css/theme.css @@ -0,0 +1,13 @@ +/* + * This file is copied from the upstream ReadTheDocs Sphinx + * theme. To aid upgradability this file should *not* be edited. + * modifications we need should be included in theme_extra.css. + * + * https://github.com/readthedocs/sphinx_rtd_theme + */ + + /* sphinx_rtd_theme version 1.2.0 | MIT license */ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} diff --git a/css/theme_extra.css b/css/theme_extra.css new file mode 100644 index 00000000..ab0631a1 --- /dev/null +++ b/css/theme_extra.css @@ -0,0 +1,197 @@ +/* + * Wrap inline code samples otherwise they shoot of the side and + * can't be read at all. + * + * https://github.com/mkdocs/mkdocs/issues/313 + * https://github.com/mkdocs/mkdocs/issues/233 + * https://github.com/mkdocs/mkdocs/issues/834 + */ +.rst-content code { + white-space: pre-wrap; + word-wrap: break-word; + padding: 2px 5px; +} + +/** + * Make code blocks display as blocks and give them the appropriate + * font size and padding. + * + * https://github.com/mkdocs/mkdocs/issues/855 + * https://github.com/mkdocs/mkdocs/issues/834 + * https://github.com/mkdocs/mkdocs/issues/233 + */ +.rst-content pre code { + white-space: pre; + word-wrap: normal; + display: block; + padding: 12px; + font-size: 12px; +} + +/** + * Fix code colors + * + * https://github.com/mkdocs/mkdocs/issues/2027 + */ +.rst-content code { + color: #E74C3C; +} + +.rst-content pre code { + color: #000; + background: #f8f8f8; +} + +/* + * Fix link colors when the link text is inline code. + * + * https://github.com/mkdocs/mkdocs/issues/718 + */ +a code { + color: #2980B9; +} +a:hover code { + color: #3091d1; +} +a:visited code { + color: #9B59B6; +} + +/* + * The CSS classes from highlight.js seem to clash with the + * ReadTheDocs theme causing some code to be incorrectly made + * bold and italic. + * + * https://github.com/mkdocs/mkdocs/issues/411 + */ +pre .cs, pre .c { + font-weight: inherit; + font-style: inherit; +} + +/* + * Fix some issues with the theme and non-highlighted code + * samples. Without and highlighting styles attached the + * formatting is broken. + * + * https://github.com/mkdocs/mkdocs/issues/319 + */ +.rst-content .no-highlight { + display: block; + padding: 0.5em; + color: #333; +} + + +/* + * Additions specific to the search functionality provided by MkDocs + */ + +.search-results { + margin-top: 23px; +} + +.search-results article { + border-top: 1px solid #E1E4E5; + padding-top: 24px; +} + +.search-results article:first-child { + border-top: none; +} + +form .search-query { + width: 100%; + border-radius: 50px; + padding: 6px 12px; + border-color: #D1D4D5; +} + +/* + * Improve inline code blocks within admonitions. + * + * https://github.com/mkdocs/mkdocs/issues/656 + */ + .rst-content .admonition code { + color: #404040; + border: 1px solid #c7c9cb; + border: 1px solid rgba(0, 0, 0, 0.2); + background: #f8fbfd; + background: rgba(255, 255, 255, 0.7); +} + +/* + * Account for wide tables which go off the side. + * Override borders to avoid weirdness on narrow tables. + * + * https://github.com/mkdocs/mkdocs/issues/834 + * https://github.com/mkdocs/mkdocs/pull/1034 + */ +.rst-content .section .docutils { + width: 100%; + overflow: auto; + display: block; + border: none; +} + +td, th { + border: 1px solid #e1e4e5 !important; + border-collapse: collapse; +} + +/* + * Without the following amendments, the navigation in the theme will be + * slightly cut off. This is due to the fact that the .wy-nav-side has a + * padding-bottom of 2em, which must not necessarily align with the font-size of + * 90 % on the .rst-current-version container, combined with the padding of 12px + * above and below. These amendments fix this in two steps: First, make sure the + * .rst-current-version container has a fixed height of 40px, achieved using + * line-height, and then applying a padding-bottom of 40px to this container. In + * a second step, the items within that container are re-aligned using flexbox. + * + * https://github.com/mkdocs/mkdocs/issues/2012 + */ + .wy-nav-side { + padding-bottom: 40px; +} + +/* For section-index only */ +.wy-menu-vertical .current-section p { + background-color: #e3e3e3; + color: #404040; +} + +/* + * The second step of above amendment: Here we make sure the items are aligned + * correctly within the .rst-current-version container. Using flexbox, we + * achieve it in such a way that it will look like the following: + * + * [No repo_name] + * Next >> // On the first page + * << Previous Next >> // On all subsequent pages + * + * [With repo_name] + * Next >> // On the first page + * << Previous Next >> // On all subsequent pages + * + * https://github.com/mkdocs/mkdocs/issues/2012 + */ +.rst-versions .rst-current-version { + padding: 0 12px; + display: flex; + font-size: initial; + justify-content: space-between; + align-items: center; + line-height: 40px; +} + +/* + * Please note that this amendment also involves removing certain inline-styles + * from the file ./mkdocs/themes/readthedocs/versions.html. + * + * https://github.com/mkdocs/mkdocs/issues/2012 + */ +.rst-current-version span { + flex: 1; + text-align: center; +} diff --git a/css/triply.css b/css/triply.css new file mode 100644 index 00000000..d0ba0bfd --- /dev/null +++ b/css/triply.css @@ -0,0 +1,139 @@ +:not(pre) > code { + background: #f8f8f8 !important; + color: #000 !important; + padding: .3em .4em !important; + border-radius: .3em !important; +} + +h1 { + font-size: 190% !important; +} + +h2 { + font-size: 160% !important; +} + +h3 { + font-size: 120% !important; + margin-bottom: 10px !important; +} + +h4 { + font-size: 100% !important; + margin-bottom: 10px !important; +} + + +h5 { + font-size: 85% !important; +} + + + + +ul + pre { + margin-bottom: 2rem; +} + +div.document p ~ ul { + margin-top: 0px; +} + +code { + line-height: 170% !important; +} + +div.mermaid svg { + max-width: 100% !important; +} + +table th, table td { + white-space: normal !important; + padding: 4px; +} + +table { + margin-bottom: 10px; +} + +p { + text-align: justify !important; +} + + +div[itemprop="articleBody"] { + counter-reset: h2; +} + +h2 { counter-reset: h3; } +h3 { counter-reset: h4; } +h4 { counter-reset: h5; } +h5 { counter-reset: h6; } +h1:before, h2:before, h3:before, h4:before, h5:before { + color: #666; +} +h2:before { content: counter(h2)". "; counter-increment: h2; } +h3:before { content: counter(h2) "." counter(h3) ". "; counter-increment: h3; } +h4:before { content: counter(h2) "." counter(h3) "." counter(h4) ". "; counter-increment: h4; } +h5:before { content: counter(h2) "." counter(h3) "." counter(h4) "." counter(h5) ". "; counter-increment: h5; } + +h1#search::before, +#mkdocs-search-results h3::before + { + display: none; +} + +div.toc { + display: none; + position: fixed; + left: 1100px; + top: 0px; + border-left: 2px solid #ddd; + display: table; + font-size: 90%; + padding: 20px; + width: auto; +} + +div.toc li, div.toc ul, div.toc ul li{ + list-style: outside none none !important; +} + +div.toc .toctitle { + display: block; + font-weight: bold; + margin-bottom: 10px; +} +div.toc ul { + margin: 0px !important; +} + +div.toc > ul > li { + margin: 0 !important; +} + + +div.toc ul li { + margin-left: 10px !important; + line-height: 18px; +} + +div.toc a { + padding: 2px; +} +div.toc a:visited { + color: inherit; +} +div.toc a:hover { + color: #55a5d9; +} +div.toc a.active { + background-color: #55a5d9; + color: #fff; +} + +@media screen and (min-width: 1300px) { + div.toc { + display: block; + } +} \ No newline at end of file diff --git a/generics/Graphql/index.html b/generics/Graphql/index.html new file mode 100644 index 00000000..0dc64147 --- /dev/null +++ b/generics/Graphql/index.html @@ -0,0 +1,718 @@ + + + + + + + + + GraphQL implementation information - Triply Documentation + + + + + + + + + + + +
+ + +
+ +
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Graphql implementation

+

Some TriplyDB instances expose a GraphQL endpoint. This endpoint uses information from user-provided SHACL shapes for the schema creation.

+

The goal of this documentation is to inform users about Triply's implementation of the GraphQL endpoint. For more generic information about GraphQL, you can visit graphql.org or other resources. In order to understand this documentation, you have to be familiar with the SHACL language.

+

Note: in order to avoid confusion we will use the noun object as a synonym for resource and triple object when referring to the third element of a triple.

+

Schema

+

Object types

+

A basic element of the schema is object types, which represents the type of the resources that you can query.

+
type Book {
+  id:ID!
+  title:[XsdString]!
+}
+
+

This object type corresponds to the shape below:

+
shp:Book
+  a sh:NodeShape;
+  sh:targetClass sdo:Book;
+  sh:property
+    [ sh:path dc:title;
+      sh:datatype xsd:string ].
+
+

Fields

+

Fields in object types, such as title, represent properties of nodes. By default, fields return arrays of values. The only exception is when the property has sh:maxCount: 1, then the field returns a single value. +Thus, for the shape:

+
shp:Book
+  a sh:NodeShape;
+  sh:targetClass sdo:Book;
+  sh:property
+    [ sh:path dc:title;
+      sh:maxCount 1;
+      sh:datatype xsd:string ].
+
+

The object type will be:

+
type Book {
+  id:ID!
+  title:XsdString
+}
+
+

Additionally, following the best practices, fields can give null results, except for:

+
    +
  • IDs, which represents the IRI of the resource.
  • +
  • Lists, but not their elements
  • +
  • Properties that have sh:minCount 1 and sh:maxCount 1
  • +
+

Thus, for this shape:

+
shp:Book
+  a sh:NodeShape;
+  sh:targetClass sdo:Book;
+  sh:property
+    [ sh:path dc:title;
+      sh:maxCount 1;
+      sh:minCount 1;
+      sh:datatype xsd:string ].
+
+

The corresponding object type is:

+
type Book {
+  id:ID!
+  title:XsdString!
+}
+
+

If the property shape includes an sh:datatype, the field returns values of GraphQL scalar type (see example above). On the other hand, if the property shape has an sh:class pointing to a class that: +- is the sh:targetClass of a node shape, the field returns values of the corresponding object type. +- is not mentioned as a sh:targetClass in a node shape, then the type of the returned values is ExternalIri.

+

Therefore, the shapes:

+
shp:Book
+  a sh:NodeShape;
+  sh:targetClass sdo:Book;
+  sh:property
+    [ sh:path sdo:author;
+      sh:class sdo:Person ];
+    [ sh:path sdo:audio;
+      sh:class sdo:AudioObject ].
+
+shp:Person
+  a sh:NodeShape;
+  sh:targetClass sdo:Person;
+  sh:property
+    [ sh:path sdo:name;
+      sh:datatype xsd:string ].
+
+

correspond to the below graphql types:

+
type Book {
+  id:ID!
+  author:[Person]!
+  audio:[ExternalIri]!
+}
+
+type Person {
+  id:ID!
+  name:[XsdString]!
+}
+
+

IDs

+

The id field is of type ID, which represents the IRI of each resource. This ID is unique.

+

For example:

+
book:Odyssey
+  a sdo:Book;
+  dct:title "Odyssey".
+
+

The id field of this resource would be https://example.org/book/Odyssey. +You can read more information on the ID scalar in graphql.org. Also, the use of the id field is mentioned later in the section Object Global Identification.

+

Naming

+

In order to name the GraphQL types in correspondence to shapes, we follow the below conventions: +- For object types, we use the sh:targetClass of the node shape. +- For object type fields, we use the sh:path of the property shape.

+

More specifically, the name comes from the part of the IRI after the last # or otherwise the last /, converted from kebab-case to camelCase.

+

Notice that if the selected name is illegal or causes a name collision, we'll return an error informing the user about the problem and ignore this type or field.

+

Renaming

+

Shape designers are able use their custom names by using a special property: <https://triplydb.com/Triply/GraphQL/def/graphqlName>. +More specifically, the designer has to add a triple with : +- for object types, the class IRI +- for fields, the IRI of the property shape

+

as a subject, the above-mentioned predicate and a string literal with the custom name as triple object.

+

If we wanted to rename using the first example of the section, we would do:

+
shp:Book
+  a sh:NodeShape;
+  sh:targetClass sdo:Book;
+  sh:property
+    [ sh:path dc:title;
+      triply:graphqlName "name"; # Rename the object type field
+      sh:datatype xsd:string ]
+
+sdo:Book
+  triply:graphqlName "PieceOfArt". # Rename the object type field.
+
+

Then the corresponding object type would be:

+
type PieceOfArt {
+  id:ID!
+  name:[XsdString]!
+}
+
+

Queries

+

The user can query for objects using their unique ID. Also, they can query for objects of a specific type along with fields, and get nested information. Last, the user can get information by filtering results. Let's see some important concepts.

+

Global Object identification

+

For reasons such as caching, the user should be able to query an object by their unique ID. This is possible using global object identification, using the node(id:ID) query.

+

An example:

+
{
+  node(id: "https://example.org/book/Odyssey") {
+    id
+  }
+}
+
+

For more information on global object identification, see graphql specification.

+

Pagination

+

A simple query would be:

+
{
+  BookConnection {
+    edges {
+      node {
+        id
+        title
+      }
+    }
+  }
+}
+
+

The results would include the IRIs of books together with their titles and would be paginated.

+

In order to paginate through a large number of results, our GraphQL implementation supports cursor-based pagination using connections. For more information, please visit the Relay project's cursor-based connection pagination specification.

+

Filtering

+

When you query for objects, you might want to get back resources based on specific values in certain fields. You can do this by filtering.

+

Simple cases

+

For example, you can query for people with a specific id:

+
{
+  PersonConnection(filter: {id: "https://example.org/person/Homer"}) {
+    edges {
+      node {
+        id
+        name
+      }
+    }
+  }
+}
+
+

Another query would be to search for a person with a specific name:

+
{
+  PersonConnection(filter: {name: {eq: "Homer"}}) {
+    edges {
+      node {
+        id
+        name
+      }
+    }
+  }
+}
+
+

Notice that in the second example, there is a new field for filtering called eq. When we want to filter on a field with returns a scalar, meaning that its value is represented by a literal in linked data, we have to use comparison operators: eq,in for equality, and notEq and notIn for inequality. The operators in andnotIn are refering to lists.

+

On the other hand, when we are filtering based on IDs - or in linked data terms, based on the IRI - , as in the first example, we don't use comparison operators.

+

The only idiomatic case is the literal with a language tag and rdf:langString as a datatype. This literal is represented as { value: "example-string", language: "en" } and the corresponding scalar is RdfsLangString . This means that in order to filter using a value of this scalar type, you have to execute the query below:

+
{
+  PersonConnection(filter: {name: {eq: {value: "Odysseus", language: "en"}}}) {
+    edges {
+      node {
+        id
+        name
+      }
+    }
+  }
+}
+
+

Language filtering

+

Additionally, there is support for filtering results based on the language tag.

+

An example is:

+
    +
  • Linked data:
  • +
+
person:Odysseus
+  a sdo:Person;
+  sdo:name
+    "Odysseus"@en,
+    "Οδυσσεύς"@gr.
+
+shp:Person
+  a sh:NodeShape;
+  sh:targetClass sdo:Person;
+  sh:property
+    [ sh:path sdo:name;
+      sh:datatype rdf:langString ].
+
+
    +
  • GraphQL query:
  • +
+
{
+  PersonConnection {
+    edges {
+      node {
+        id
+        name(language:"gr")
+      }
+    }
+  }
+}
+
+
    +
  • Results:
  • +
+
{
+  "data": {
+    "PersonConnection": {
+      "edges": [
+        {
+          "node": {
+            "id": "https://example.org/person/Odysseus",
+            "name": [
+              {
+                "value": "Οδυσσεύς",
+                "language": "gr"
+              }
+            ]
+          }
+        }
+      ]
+    }
+  }
+}
+
+

Our implementation supports using the HTTP Accept-Language syntax, for filtering based on a language-tag.

+

For example,

+
    +
  • GraphQL query:
  • +
+
{
+  PersonConnection {
+    edges {
+      node {
+        id
+        name(language:"gr, en;q=.5")
+      }
+    }
+  }
+}
+
+
    +
  • Results:
  • +
+
{
+  "data": {
+    "PersonConnection": {
+      "edges": [
+        {
+          "node": {
+            "id": "https://example.org/person/Odysseus",
+            "name": [
+              {
+                "value": "Οδυσσεύς",
+                "language": "gr"
+              },
+              {
+                "value": "Odysseus",
+                "language": "en"
+              },
+            ]
+          }
+        }
+      ]
+    }
+  }
+}
+
+

If the writer of the shapes includes the sh:uniqueLang constraint, then the result returned will be a single value, instead of an array.

+

Thus, the example becomes:

+
    +
  • Linked data:
  • +
+
person:Odysseus
+  a sdo:Person;
+  sdo:name
+    "Odysseus"@en,
+    "Οδυσσεύς"@gr.
+
+shp:Person
+  a sh:NodeShape;
+  sh:targetClass sdo:Person;
+  sh:property
+    [ sh:path sdo:name;
+      sh:uniqueLang true;
+      sh:datatype rdf:langString ].
+
+
    +
  • GraphQL query:
  • +
+
{
+  PersonConnection {
+    edges {
+      node {
+        id
+        name(language:"gr, en;q=.5")
+      }
+    }
+  }
+}
+
+
    +
  • Results:
  • +
+
{
+  "data": {
+    "PersonConnection": {
+      "edges": [
+        {
+          "node": {
+            "id": "https://example.org/person/Odysseus",
+            "name": {
+              "value": "Οδυσσεύς",
+              "language": "gr"
+            }
+          }
+        }
+      ]
+    }
+  }
+}
+
+

Advanced filtering

+

Furthermore, there is possibility for nested filtering:

+
{
+  BookConnection(
+    filter: {author: {name: {eq: "Homer"}}}
+  ) {
+    edges {
+      node {
+        id
+      }
+    }
+  }
+}
+
+

and for combination of filters:

+
{
+  BookConnection(
+    filter: {author: {name: {eq: "Homer"}}, name: {eq: "Odyssey"}}
+  ) {
+    edges {
+      node {
+        id
+      }
+    }
+  }
+}
+
+

Note: The combination of filters is executed in an 'and' logic.

+ +
+
+ +
+ +
+ +
+
+ +
+
+ +
+ +
+ +
+ + + + + +
+ + + + + + + diff --git a/generics/JSON-LD-frames/index.html b/generics/JSON-LD-frames/index.html new file mode 100644 index 00000000..bb8fe92d --- /dev/null +++ b/generics/JSON-LD-frames/index.html @@ -0,0 +1,436 @@ + + + + + + + + + +JSON-LD Framing - Triply Documentation + + + + + + + +
+ +
+ +
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ +

JSON-LD Framing

+

Why JSON-LD Framing?

+

SPARQL Construct and SPARQL Describe queries can return results in the JSON-LD format. Here is an example:

+
[
+  {
+    "@id": "john",
+    "livesIn": { "@id": "amsterdam" }
+  },
+  {
+    "@id": "jane",
+    "livesIn": { "@id": "berlin" }
+  },
+  {
+    "@id": "tim",
+    "livesIn": { "@id": "berlin" }
+  }
+]
+
+

JSON-LD is one of the serialization formats for RDF, and encodes a graph structure. For example, the JSON-LD snippet above encodes the following graph:

+
graph TB + Tim -- livesIn --> Berlin + John -- livesIn --> Amsterdam + Jane -- livesIn --> Berlin +
+

The triples in a graphs do not have any specific order. In our graph picture, the triple about Tim is mentioned first, but this is arbitrary. A graph is a set of triples, so there is no 'first' or 'last' triple. Similarly, there is no 'primary' or 'secondary' element in a graph structure either. In our graph picture, persons occur on the left hand-side and cities occur on the right hand-side. In fact, the same information can be expressed with the following graph:

+

Most RESTful APIs return data with a specific, often tree-shaped structure. For example:

+
{
+  "amsterdam": {
+    "inhabitants": [
+      "john"
+    ]
+  },
+  "berlin": {
+    "inhabitants": [
+      "jane",
+      "tim"
+    ]
+  }
+}
+
+

JSON-LD Framing is a standard that is used to assign additional structure to JSON-LD. With JSON-LD Framing, we can configure the extra structure that is needed to create RESTful APIs over SPARQL queries.

+

JSON-LD Framing are a deterministic translation from a graph, which has an unordered set of triples where no node is "first" or "special", into a tree, which has ordered branches and exactly one "root" node. In other words, JSON-LD framing allows one to force a specific tree layout to a JSON-LD document. This makes it possible to translate SPARQL queries to REST-APIs.

+

The TriplyDB API for saved queries has been equipped with a JSON-LD profiler which can apply a JSON-LD profile to a JSON-LD result, transforming the plain JSON-LD to framed JSON. To do this you need two things. A SPARQL construct query and a JSON-LD frame. When you have both of these, you can retrieve plain JSON from a SPARQL query. The cURL command when both the SPARQL query and JSON-LD frame are available is:

+
curl -X POST [SAVED-QUERY-URL] \
+  -H 'Accept: application/ld+json;profile=http://www.w3.org/ns/json-ld#framed' \
+  -H 'Authorization: Bearer [YOUR_TOKEN]' \
+  -H 'Content-type: application/json' \
+  -d '[YOUR_FRAME]'
+
+

When sending a curl request, a few things are important. First, the request needs to be a POST request. Only a POST request can accept a frame as a body. The Accept header needs to be set to a specific value. The Accept header needs to have both the expected returned content-type and the JSON-LD profile, e.g. application/ld+json;profile=http://www.w3.org/ns/json-ld#framed. When querying an internal or private query you need to add an authorization token. Finally, it is important to set the Content-type. It refers to the content-type of the input body and needs to be application/json, as the frame is of type application/json.

+

The SPARQL Query

+

Let's start with the SPARQL query. A JSON-LD frame query needs a SPARQL Construct query to create an RDF graph that is self contained and populated with relevant vocabulary and data. The graph in JSON-LD is used as input for the RESTful API call. The SPARQL Construct query can be designed with API variables.

+

Do note that API variables with OPTIONALs can sometimes behave a bit different than regular API variables. This is due to how SPARQL interprets OPTIONALs. If an API variable is used in an OPTIONAL, the query will return false positives, as the OPTIONAL does not filter out results matching the API-variable.

+

Also note that the use of UNIONs can have unexpected effects on the SPARQL query. A union could split up the result set of the SPARQL query. Meaning that the SPARQL engine first exhausts the top part of the UNION and then starts with the second part of the UNION. This means that the first part of the result set can be disconnected from the second part. If the limit is set too small the result set is separated in two different JSON-LD documents. This could result in missing data in the response.

+

Finally, please note that it can happen that you set a pageSize of 10 but the response contains less than 10 results, while the next page is not empty. This is possible as the result set of the WHERE clause is limited with a limit and not the Construct clause. This means that two rows of the resulting WHERE clause are condensed into a single result in the Construct clause. Thus the response of the API can differ from the pageSize.

+

The result is a set of triples according to the query. Saving the SPARQL query will resolve in a saved query. The saved query has an API URL that we can now use in our cURL command. The URL most of the time starts with api and ends with run.

+

The saved query url of an example query is:

+
https://api.triplydb.com/queries/JD/JSON-LD-frame/run
+
+

You could use API variables with a ? e.g. ?[queryVariable]=[value]

+

The Frame

+

The SPARQL query is not enough to provide the RDF data in a JSON serialization format. It requires additional syntactic conformities that cannot be defined in a SPARQL query. Thus the SPARQL query that was created needs a frame to restructure JSON-LD objects into JSON. The JSON-LD 1.1 standard allows for restructuring JSON-LD objects with a frame to JSON.

+

A JSON-LD frame consists out of 2 parts. The @context of the response, and the structure of the response. The complete specification on JSON-LD frames can be found online

+

The @context is the translation of the linked data to the JSON naming. In the @context all the IRIs that occur in the JSON-LD response are documented, with key-value pairs, where the key corresponds to a name the IRI will take in the REST-API response and the value corresponds to the IRI in the JSON-LD response. Most of the time the key-value pairs are one-to-one relations, where one key is mapped to a single string. Sometimes the value is an object. The object contains at least the @id, which is the IRI in the JSON-LD response. The object can also contain other modifiers, that change the REST-API response. Examples are, @type to define the datatype of the object value, or @container to define the container where the value in the REST-API response is stored in. The context can also hold references to vocabularies or prefixes.

+

The second part of the JSON-LD frame is the structure of the data. The structure defines how the REST-API response will look like. Most of the time the structure starts with @type to denote the type that the root node should have. Setting the @type is the most straightforward way of selecting your root node. The structure is built outward from the root node. You can define a leaf node in the structure by adding an opening and closing bracket, as shown in the example. To define a nested node you first need to define the key that is a object property in the JSON-LD response that points to another IRI. Then from that IRI the node is created filling in the properties of that node.

+
{
+  "@context": {
+    "addresses": "ex:address",
+    "Address": "ex:Address",
+    "Object": "ex:Object",
+    "street": "ex:street",
+    "number": {
+      "@id": "ex:number",
+      "@type": "xsd:integer"
+    },
+    "labels": {
+      "@id": "ex:label",
+      "@container": "@set"
+    },
+    "ex": "https://triply.cc/example/",
+    "xsd": "http://www.w3.org/2001/XMLSchema#"
+  },
+  "@type": "Object",
+  "labels": {},
+  "addresses": {
+    "street": {},
+    "number": {}
+  }
+}
+
+

The JSON-LD frame together with the SPARQL query will now result in a REST-API result:

+
curl -X POST https://api.triplydb.com/queries/JD/JSON-LD-frame/run \
+  -H 'Accept: application/ld+json;profile=http://www.w3.org/ns/json-ld#framed' \
+  -H 'Content-type: application/json' \
+  -d '{
+    "@context": {
+      "addresses": "ex:address",
+      "Address": "ex:Address",
+      "Object": "ex:Object",
+      "street": "ex:street",
+      "number": {
+        "@id": "ex:number",
+        "@type": "xsd:integer"
+      },
+      "labels": {
+        "@id": "ex:label",
+        "@container": "@set"
+      },
+      "ex": "https://triply.cc/example/",
+      "xsd": "http://www.w3.org/2001/XMLSchema#"
+    },
+    "@type": "Object",
+    "labels": {},
+    "addresses": {
+      "street": {},
+      "number": {}
+    }
+  }'
+
+

The JSON-LD frame turns SPARQL results for the query in step 1 into a format that is accepted as plain RESTful API request.

+

Using SPARQL to create a frame

+

Another way to create a frame is by using the SPARQL editor in TriplyDB.

+

You can access the JSON-LD editor by clicking the three dots next to the SPARQL editor, and then selecting "To JSON-LD frame editor".

+

SPARQL editor

+

Afterwards, the JSON script from above should be added to the JSON-LD Frame editor.

+

Ld-Frame box

+

Running the script results in the following REST-API result:

+

REST-API result

+

This can also be accessed by the generated API Link above the SPARQL editor. +Copying and pasting the generated link will direct you to a page where you can view the script:

+

+
+
+
+
+ +
+
+
+
+
+
+
+ + +
+ + + + + + diff --git a/generics/api-token/index.html b/generics/api-token/index.html new file mode 100644 index 00000000..b32d9bf7 --- /dev/null +++ b/generics/api-token/index.html @@ -0,0 +1,356 @@ + + + + + + + + + API Token - Triply Documentation + + + + + + + + + + + +
+ + +
+ +
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ +
On this page: +
+

API Token

+

Applications (see TriplyDB.js) and pipelines (see TriplyETL) often require access rights to interact with TriplyDB instances. Specifically, reading non-public data and writing any (public or non-public) data requires setting an API token. The token ensures that only users that are specifically authorized for certain datasets are able to access and/or modify those datasets.

+

The following steps must be performed in order to create an API token:

+
    +
  1. Log into the web GUI of the TriplyDB server where you have an account and for which you want to obtain special access rights in your application or pipeline.
  2. +
+

Many organizations use their own TriplyDB server. If your organization does not yet have a TriplyDB server, you can also create a free account over at TriplyDB.com.

+
    +
  1. +

    Go to your user settings page. This page is reached by clicking on the user menu in the top-right corner and choosing “User settings”.

    +
  2. +
  3. +

    Go to the “API tokens” tab.

    +
  4. +
  5. +

    Click on “Create token”.

    +
  6. +
  7. +

    Enter a name that describes the purpose of the token. This can be the name of the application or pipeline for which the API token will be used.

    +
  8. +
+

You can use the name to manage the token later. For example, you can remove tokens for applications that are no longer used later on. It is good practice to create different API tokens for different applications.

+
    +
  1. +

    Choose the permission level that is sufficient for what you want to do with the API token. Notice that “Management access” is often not needed. “Read access” is sufficient for read-only applications. “Write access” is sufficient for most pipelines and applications that require write access.

    +
  2. +
  3. +

    Management access: if your application must create or change organization accounts in the TriplyDB server.

    +
  4. +
  5. +

    Write access: if your application must write (meta)data in the TriplyDB server.

    +
  6. +
  7. +

    Read access: if your application must read public and/or private data from the TriplyDB server.

    +
  8. +
  9. +

    Click the “Create” button to create your token. The token (a long sequence of characters) will now appear in a dialog.

    +
  10. +
+

For security reasons, the token will only be shown once. You can copy the token over to the application where you want to use it.

+ +
+
+ +
+ +
+ +
+
+ +
+
+ +
+ +
+ +
+ + + + + +
+ + + + + + + diff --git a/generics/sparql-pagination/index.html b/generics/sparql-pagination/index.html new file mode 100644 index 00000000..188b12ba --- /dev/null +++ b/generics/sparql-pagination/index.html @@ -0,0 +1,384 @@ + + + + + + + + + SPARQL pagination - Triply Documentation + + + + + + + + + + + +
+ + +
+ +
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

SPARQL Pagination

+

This page explains how to retrieve all results from a SPARQL query using pagination.

+

Often SPARQL queries can return more than 10.000 results, but due to limitations the result set will only consist out of the first 10.000 results. To retrieve more than 10.000 results you can use pagination. TriplyDB supports two methods to retrieve all results from a SPARQL query. Pagination with the saved query API or Pagination with TriplyDB.js.

+

Pagination with the saved query API

+

Each TriplyDB instance has a fully RESTful API. The TriplyDB RESTful API is extended for saved SPARQL queries. The API for saved queries is extended with two arguments that the query is able to process paginated result sets. The arguments are ‘page’ and ‘pageSize’. An example of a paginated saved SPARQL query request would look like: +https://api.triplydb.com/queries/academy/pokemon-color/run?page=3&pageSize=100

+

The example request argument ‘page’ corresponds to the requested page. In the example request this would correspond to the third page of paginated SPARQL query, according to the ‘pageSize’. There is no maximum ‘page’ limit, as a SPARQL query could return an arbitrary number of results. When no results can be retrieved for the requested page an empty page will be returned.

+

The argument ‘pageSize’ corresponds to how many results each page would contain. The ‘pageSize’ has a default of 100 returned results and a maximum ‘pageSize’ limit of 10.000 returned results. The request will return an error when the ‘pageSize’ is set higher than 10.000.

+

The RESTful API for the saved SPARQL queries follows the RFC 8288 standard.

+

The request will return a response body containing the result set and a response header. The response header contains a link header with the relative "next" request, the relative "prev" request, and the relative "first" request. By following the "next" link header request you can chain the pagination and retrieve all results.

+
link:
+    <https://api.triplydb.com/queries/academy/pokemon-color/run?page=4&pageSize=100>; rel="next",
+    <https://api.triplydb.com/queries/academy/pokemon-color/run?page=2&pageSize=100>; rel="prev",
+    <https://api.triplydb.com/queries/academy/pokemon-color/run?page=1&pageSize=100>; rel="first"
+
+

Pagination with TriplyDB.js

+

TriplyDB.js is the official programming library for interacting with TriplyDB. TriplyDB.js allows the user to connect to a TriplyDB instance via the TypeScript language. TriplyDB.js has the advantage that it can handle pagination internally so it can reliably retrieve a large number of results.

+

To get the output for a construct or select query, follow these steps:

+

1. Import the triplyDB.js library and set your parameters, regarding the TriplyDB instance and the account in which you have saved the query as well as the name of the query. Do not forget that we perform TriplyDB.js requests within an async context.

+
import Client from '@triply/triplydb'
+async function run() {
+  // Your code goes here.
+  const client = Client.get({token: process.env.TRIPLYDB_TOKEN})
+  const account = await client.getAccount('account-name')
+  const query = await account.getQuery('name-of-some-query')
+}
+run()
+
+

2. Get the results of a query by setting a results variable. More specifically, for construct queries you use the statements() call:

+
const query = await account.getQuery('name-of-some-query')
+const results = query.results().statements()
+
+

For select queries you use the bindings() call:

+
const query = await account.getQuery('name-of-some-query')
+const results = query.results().bindings()
+
+

Additionally, saved queries can have 'API variables' that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows:

+
// For SPARQL construct queries.
+const results = query.results({
+  someVariable: 'value of someVariable',
+  anotherVariable: 'value of anotherVariable'
+}).statements()
+// For SPARQL select queries.
+const results = query.results({
+  someVariable: 'value of someVariable',
+  anotherVariable: 'value of anotherVariable'
+}).bindings()
+
+

3. To iterate the results of your SPARQL query you have three options:

+

3.1. Iterate through the results per row in a for-loop:

+
// Iterating over the results.
+for await (const row of results) {
+  // execute something
+}
+
+

Note: For select queries the for-loop iterates over the rows of the result set. For construct queries the for-loop iterates over the statements in the result set.

+

3.2. Save the results to a file. This is only supported for SPARQL construct queries:

+
// Saving the results of a SPARQL construct query to a file.
+await results.toFile('my-file.nt')
+
+

3.3. Load all results into memory in the form of an Array. Note that this is almost never used. If you want to process results, then use the 3a option; if you want to persist results, then option 3b suits better.

+
// Loading results for a SPARQL construct or SPARQL select query into memory.
+const array = await results.toArray()
+
+ +
+
+ +
+ +
+ +
+
+ +
+
+ +
+ +
+ +
+ + + + + +
+ + + + + + + diff --git a/img/favicon.ico b/img/favicon.ico new file mode 100644 index 00000000..e85006a3 Binary files /dev/null and b/img/favicon.ico differ diff --git a/index.html b/index.html new file mode 100644 index 00000000..e327d134 --- /dev/null +++ b/index.html @@ -0,0 +1,348 @@ + + + + + + + + + Triply Documentation + + + + + + + + + + + +
+ + +
+ +
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ +
On this page: +
+

+

Triply Documentation

+

What can we help you with?

+

TriplyDB

+

TriplyDB is a state-of-the-art linked database / triple store that is used by organizations of any size: from start-ups to orgs with 10K+ employees.

+

Learn more about how to use TriplyDB

+

TriplyETL

+

Use TriplyETL to quickly connect your data sources to your linked database / triple store. TriplyETL can be extract, transform, enrich, validate, and load linked data.

+

Learn more about how to use TriplyETL

+
+

Didn't find what you were looking for? Contact us via our form or by e-mailing to info@triply.cc.

+
+ +
+
+ + +
+ +
+ +
+
+ +
+
+ +
+ +
+ +
+ + + + + Next » + + +
+ + + + + + + + + diff --git a/js/highlight.min.js b/js/highlight.min.js new file mode 100644 index 00000000..6344bf12 --- /dev/null +++ b/js/highlight.min.js @@ -0,0 +1,1243 @@ +/*! + Highlight.js v11.9.0 (git: 2a4acf40ca) + (c) 2006-2023 undefined and other contributors + License: BSD-3-Clause + */ +var hljs=function(){"use strict";function e(n){ +return n instanceof Map?n.clear=n.delete=n.set=()=>{ +throw Error("map is read-only")}:n instanceof Set&&(n.add=n.clear=n.delete=()=>{ +throw Error("set is read-only") +}),Object.freeze(n),Object.getOwnPropertyNames(n).forEach((t=>{ +const a=n[t],i=typeof a;"object"!==i&&"function"!==i||Object.isFrozen(a)||e(a) +})),n}class n{constructor(e){ +void 0===e.data&&(e.data={}),this.data=e.data,this.isMatchIgnored=!1} +ignoreMatch(){this.isMatchIgnored=!0}}function t(e){ +return e.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'") +}function a(e,...n){const t=Object.create(null);for(const n in e)t[n]=e[n] +;return n.forEach((e=>{for(const n in e)t[n]=e[n]})),t}const i=e=>!!e.scope +;class r{constructor(e,n){ +this.buffer="",this.classPrefix=n.classPrefix,e.walk(this)}addText(e){ +this.buffer+=t(e)}openNode(e){if(!i(e))return;const n=((e,{prefix:n})=>{ +if(e.startsWith("language:"))return e.replace("language:","language-") +;if(e.includes(".")){const t=e.split(".") +;return[`${n}${t.shift()}`,...t.map(((e,n)=>`${e}${"_".repeat(n+1)}`))].join(" ") +}return`${n}${e}`})(e.scope,{prefix:this.classPrefix});this.span(n)} +closeNode(e){i(e)&&(this.buffer+="")}value(){return this.buffer}span(e){ +this.buffer+=``}}const s=(e={})=>{const n={children:[]} +;return Object.assign(n,e),n};class o{constructor(){ +this.rootNode=s(),this.stack=[this.rootNode]}get top(){ +return this.stack[this.stack.length-1]}get root(){return this.rootNode}add(e){ +this.top.children.push(e)}openNode(e){const n=s({scope:e}) +;this.add(n),this.stack.push(n)}closeNode(){ +if(this.stack.length>1)return this.stack.pop()}closeAllNodes(){ +for(;this.closeNode(););}toJSON(){return JSON.stringify(this.rootNode,null,4)} +walk(e){return this.constructor._walk(e,this.rootNode)}static _walk(e,n){ +return"string"==typeof n?e.addText(n):n.children&&(e.openNode(n), +n.children.forEach((n=>this._walk(e,n))),e.closeNode(n)),e}static _collapse(e){ +"string"!=typeof e&&e.children&&(e.children.every((e=>"string"==typeof e))?e.children=[e.children.join("")]:e.children.forEach((e=>{ +o._collapse(e)})))}}class l extends o{constructor(e){super(),this.options=e} +addText(e){""!==e&&this.add(e)}startScope(e){this.openNode(e)}endScope(){ +this.closeNode()}__addSublanguage(e,n){const t=e.root +;n&&(t.scope="language:"+n),this.add(t)}toHTML(){ +return new r(this,this.options).value()}finalize(){ +return this.closeAllNodes(),!0}}function c(e){ +return e?"string"==typeof e?e:e.source:null}function d(e){return b("(?=",e,")")} +function g(e){return b("(?:",e,")*")}function u(e){return b("(?:",e,")?")} +function b(...e){return e.map((e=>c(e))).join("")}function m(...e){const n=(e=>{ +const n=e[e.length-1] +;return"object"==typeof n&&n.constructor===Object?(e.splice(e.length-1,1),n):{} +})(e);return"("+(n.capture?"":"?:")+e.map((e=>c(e))).join("|")+")"} +function p(e){return RegExp(e.toString()+"|").exec("").length-1} +const _=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./ +;function h(e,{joinWith:n}){let t=0;return e.map((e=>{t+=1;const n=t +;let a=c(e),i="";for(;a.length>0;){const e=_.exec(a);if(!e){i+=a;break} +i+=a.substring(0,e.index), +a=a.substring(e.index+e[0].length),"\\"===e[0][0]&&e[1]?i+="\\"+(Number(e[1])+n):(i+=e[0], +"("===e[0]&&t++)}return i})).map((e=>`(${e})`)).join(n)} +const f="[a-zA-Z]\\w*",E="[a-zA-Z_]\\w*",y="\\b\\d+(\\.\\d+)?",N="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",w="\\b(0b[01]+)",v={ +begin:"\\\\[\\s\\S]",relevance:0},O={scope:"string",begin:"'",end:"'", +illegal:"\\n",contains:[v]},x={scope:"string",begin:'"',end:'"',illegal:"\\n", +contains:[v]},k=(e,n,t={})=>{const i=a({scope:"comment",begin:e,end:n, +contains:[]},t);i.contains.push({scope:"doctag", +begin:"[ ]*(?=(TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):)", +end:/(TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):/,excludeBegin:!0,relevance:0}) +;const r=m("I","a","is","so","us","to","at","if","in","it","on",/[A-Za-z]+['](d|ve|re|ll|t|s|n)/,/[A-Za-z]+[-][a-z]+/,/[A-Za-z][a-z]{2,}/) +;return i.contains.push({begin:b(/[ ]+/,"(",r,/[.]?[:]?([.][ ]|[ ])/,"){3}")}),i +},M=k("//","$"),S=k("/\\*","\\*/"),A=k("#","$");var C=Object.freeze({ +__proto__:null,APOS_STRING_MODE:O,BACKSLASH_ESCAPE:v,BINARY_NUMBER_MODE:{ +scope:"number",begin:w,relevance:0},BINARY_NUMBER_RE:w,COMMENT:k, +C_BLOCK_COMMENT_MODE:S,C_LINE_COMMENT_MODE:M,C_NUMBER_MODE:{scope:"number", +begin:N,relevance:0},C_NUMBER_RE:N,END_SAME_AS_BEGIN:e=>Object.assign(e,{ +"on:begin":(e,n)=>{n.data._beginMatch=e[1]},"on:end":(e,n)=>{ +n.data._beginMatch!==e[1]&&n.ignoreMatch()}}),HASH_COMMENT_MODE:A,IDENT_RE:f, +MATCH_NOTHING_RE:/\b\B/,METHOD_GUARD:{begin:"\\.\\s*"+E,relevance:0}, +NUMBER_MODE:{scope:"number",begin:y,relevance:0},NUMBER_RE:y, +PHRASAL_WORDS_MODE:{ +begin:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/ +},QUOTE_STRING_MODE:x,REGEXP_MODE:{scope:"regexp",begin:/\/(?=[^/\n]*\/)/, +end:/\/[gimuy]*/,contains:[v,{begin:/\[/,end:/\]/,relevance:0,contains:[v]}]}, +RE_STARTERS_RE:"!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~", +SHEBANG:(e={})=>{const n=/^#![ ]*\// +;return e.binary&&(e.begin=b(n,/.*\b/,e.binary,/\b.*/)),a({scope:"meta",begin:n, +end:/$/,relevance:0,"on:begin":(e,n)=>{0!==e.index&&n.ignoreMatch()}},e)}, +TITLE_MODE:{scope:"title",begin:f,relevance:0},UNDERSCORE_IDENT_RE:E, +UNDERSCORE_TITLE_MODE:{scope:"title",begin:E,relevance:0}});function T(e,n){ +"."===e.input[e.index-1]&&n.ignoreMatch()}function R(e,n){ +void 0!==e.className&&(e.scope=e.className,delete e.className)}function I(e,n){ +n&&e.beginKeywords&&(e.begin="\\b("+e.beginKeywords.split(" ").join("|")+")(?!\\.)(?=\\b|\\s)", +e.__beforeBegin=T,e.keywords=e.keywords||e.beginKeywords,delete e.beginKeywords, +void 0===e.relevance&&(e.relevance=0))}function D(e,n){ +Array.isArray(e.illegal)&&(e.illegal=m(...e.illegal))}function L(e,n){ +if(e.match){ +if(e.begin||e.end)throw Error("begin & end are not supported with match") +;e.begin=e.match,delete e.match}}function B(e,n){ +void 0===e.relevance&&(e.relevance=1)}const $=(e,n)=>{if(!e.beforeMatch)return +;if(e.starts)throw Error("beforeMatch cannot be used with starts") +;const t=Object.assign({},e);Object.keys(e).forEach((n=>{delete e[n] +})),e.keywords=t.keywords,e.begin=b(t.beforeMatch,d(t.begin)),e.starts={ +relevance:0,contains:[Object.assign(t,{endsParent:!0})] +},e.relevance=0,delete t.beforeMatch +},z=["of","and","for","in","not","or","if","then","parent","list","value"],F="keyword" +;function U(e,n,t=F){const a=Object.create(null) +;return"string"==typeof e?i(t,e.split(" ")):Array.isArray(e)?i(t,e):Object.keys(e).forEach((t=>{ +Object.assign(a,U(e[t],n,t))})),a;function i(e,t){ +n&&(t=t.map((e=>e.toLowerCase()))),t.forEach((n=>{const t=n.split("|") +;a[t[0]]=[e,P(t[0],t[1])]}))}}function P(e,n){ +return n?Number(n):(e=>z.includes(e.toLowerCase()))(e)?0:1}const j={},K=e=>{ +console.error(e)},H=(e,...n)=>{console.log("WARN: "+e,...n)},q=(e,n)=>{ +j[`${e}/${n}`]||(console.log(`Deprecated as of ${e}. ${n}`),j[`${e}/${n}`]=!0) +},G=Error();function Z(e,n,{key:t}){let a=0;const i=e[t],r={},s={} +;for(let e=1;e<=n.length;e++)s[e+a]=i[e],r[e+a]=!0,a+=p(n[e-1]) +;e[t]=s,e[t]._emit=r,e[t]._multi=!0}function W(e){(e=>{ +e.scope&&"object"==typeof e.scope&&null!==e.scope&&(e.beginScope=e.scope, +delete e.scope)})(e),"string"==typeof e.beginScope&&(e.beginScope={ +_wrap:e.beginScope}),"string"==typeof e.endScope&&(e.endScope={_wrap:e.endScope +}),(e=>{if(Array.isArray(e.begin)){ +if(e.skip||e.excludeBegin||e.returnBegin)throw K("skip, excludeBegin, returnBegin not compatible with beginScope: {}"), +G +;if("object"!=typeof e.beginScope||null===e.beginScope)throw K("beginScope must be object"), +G;Z(e,e.begin,{key:"beginScope"}),e.begin=h(e.begin,{joinWith:""})}})(e),(e=>{ +if(Array.isArray(e.end)){ +if(e.skip||e.excludeEnd||e.returnEnd)throw K("skip, excludeEnd, returnEnd not compatible with endScope: {}"), +G +;if("object"!=typeof e.endScope||null===e.endScope)throw K("endScope must be object"), +G;Z(e,e.end,{key:"endScope"}),e.end=h(e.end,{joinWith:""})}})(e)}function Q(e){ +function n(n,t){ +return RegExp(c(n),"m"+(e.case_insensitive?"i":"")+(e.unicodeRegex?"u":"")+(t?"g":"")) +}class t{constructor(){ +this.matchIndexes={},this.regexes=[],this.matchAt=1,this.position=0} +addRule(e,n){ +n.position=this.position++,this.matchIndexes[this.matchAt]=n,this.regexes.push([n,e]), +this.matchAt+=p(e)+1}compile(){0===this.regexes.length&&(this.exec=()=>null) +;const e=this.regexes.map((e=>e[1]));this.matcherRe=n(h(e,{joinWith:"|" +}),!0),this.lastIndex=0}exec(e){this.matcherRe.lastIndex=this.lastIndex +;const n=this.matcherRe.exec(e);if(!n)return null +;const t=n.findIndex(((e,n)=>n>0&&void 0!==e)),a=this.matchIndexes[t] +;return n.splice(0,t),Object.assign(n,a)}}class i{constructor(){ +this.rules=[],this.multiRegexes=[], +this.count=0,this.lastIndex=0,this.regexIndex=0}getMatcher(e){ +if(this.multiRegexes[e])return this.multiRegexes[e];const n=new t +;return this.rules.slice(e).forEach((([e,t])=>n.addRule(e,t))), +n.compile(),this.multiRegexes[e]=n,n}resumingScanAtSamePosition(){ +return 0!==this.regexIndex}considerAll(){this.regexIndex=0}addRule(e,n){ +this.rules.push([e,n]),"begin"===n.type&&this.count++}exec(e){ +const n=this.getMatcher(this.regexIndex);n.lastIndex=this.lastIndex +;let t=n.exec(e) +;if(this.resumingScanAtSamePosition())if(t&&t.index===this.lastIndex);else{ +const n=this.getMatcher(0);n.lastIndex=this.lastIndex+1,t=n.exec(e)} +return t&&(this.regexIndex+=t.position+1, +this.regexIndex===this.count&&this.considerAll()),t}} +if(e.compilerExtensions||(e.compilerExtensions=[]), +e.contains&&e.contains.includes("self"))throw Error("ERR: contains `self` is not supported at the top-level of a language. See documentation.") +;return e.classNameAliases=a(e.classNameAliases||{}),function t(r,s){const o=r +;if(r.isCompiled)return o +;[R,L,W,$].forEach((e=>e(r,s))),e.compilerExtensions.forEach((e=>e(r,s))), +r.__beforeBegin=null,[I,D,B].forEach((e=>e(r,s))),r.isCompiled=!0;let l=null +;return"object"==typeof r.keywords&&r.keywords.$pattern&&(r.keywords=Object.assign({},r.keywords), +l=r.keywords.$pattern, +delete r.keywords.$pattern),l=l||/\w+/,r.keywords&&(r.keywords=U(r.keywords,e.case_insensitive)), +o.keywordPatternRe=n(l,!0), +s&&(r.begin||(r.begin=/\B|\b/),o.beginRe=n(o.begin),r.end||r.endsWithParent||(r.end=/\B|\b/), +r.end&&(o.endRe=n(o.end)), +o.terminatorEnd=c(o.end)||"",r.endsWithParent&&s.terminatorEnd&&(o.terminatorEnd+=(r.end?"|":"")+s.terminatorEnd)), +r.illegal&&(o.illegalRe=n(r.illegal)), +r.contains||(r.contains=[]),r.contains=[].concat(...r.contains.map((e=>(e=>(e.variants&&!e.cachedVariants&&(e.cachedVariants=e.variants.map((n=>a(e,{ +variants:null},n)))),e.cachedVariants?e.cachedVariants:X(e)?a(e,{ +starts:e.starts?a(e.starts):null +}):Object.isFrozen(e)?a(e):e))("self"===e?r:e)))),r.contains.forEach((e=>{t(e,o) +})),r.starts&&t(r.starts,s),o.matcher=(e=>{const n=new i +;return e.contains.forEach((e=>n.addRule(e.begin,{rule:e,type:"begin" +}))),e.terminatorEnd&&n.addRule(e.terminatorEnd,{type:"end" +}),e.illegal&&n.addRule(e.illegal,{type:"illegal"}),n})(o),o}(e)}function X(e){ +return!!e&&(e.endsWithParent||X(e.starts))}class V extends Error{ +constructor(e,n){super(e),this.name="HTMLInjectionError",this.html=n}} +const J=t,Y=a,ee=Symbol("nomatch"),ne=t=>{ +const a=Object.create(null),i=Object.create(null),r=[];let s=!0 +;const o="Could not find the language '{}', did you forget to load/include a language module?",c={ +disableAutodetect:!0,name:"Plain text",contains:[]};let p={ +ignoreUnescapedHTML:!1,throwUnescapedHTML:!1,noHighlightRe:/^(no-?highlight)$/i, +languageDetectRe:/\blang(?:uage)?-([\w-]+)\b/i,classPrefix:"hljs-", +cssSelector:"pre code",languages:null,__emitter:l};function _(e){ +return p.noHighlightRe.test(e)}function h(e,n,t){let a="",i="" +;"object"==typeof n?(a=e, +t=n.ignoreIllegals,i=n.language):(q("10.7.0","highlight(lang, code, ...args) has been deprecated."), +q("10.7.0","Please use highlight(code, options) instead.\nhttps://github.com/highlightjs/highlight.js/issues/2277"), +i=e,a=n),void 0===t&&(t=!0);const r={code:a,language:i};k("before:highlight",r) +;const s=r.result?r.result:f(r.language,r.code,t) +;return s.code=r.code,k("after:highlight",s),s}function f(e,t,i,r){ +const l=Object.create(null);function c(){if(!k.keywords)return void S.addText(A) +;let e=0;k.keywordPatternRe.lastIndex=0;let n=k.keywordPatternRe.exec(A),t="" +;for(;n;){t+=A.substring(e,n.index) +;const i=w.case_insensitive?n[0].toLowerCase():n[0],r=(a=i,k.keywords[a]);if(r){ +const[e,a]=r +;if(S.addText(t),t="",l[i]=(l[i]||0)+1,l[i]<=7&&(C+=a),e.startsWith("_"))t+=n[0];else{ +const t=w.classNameAliases[e]||e;g(n[0],t)}}else t+=n[0] +;e=k.keywordPatternRe.lastIndex,n=k.keywordPatternRe.exec(A)}var a +;t+=A.substring(e),S.addText(t)}function d(){null!=k.subLanguage?(()=>{ +if(""===A)return;let e=null;if("string"==typeof k.subLanguage){ +if(!a[k.subLanguage])return void S.addText(A) +;e=f(k.subLanguage,A,!0,M[k.subLanguage]),M[k.subLanguage]=e._top +}else e=E(A,k.subLanguage.length?k.subLanguage:null) +;k.relevance>0&&(C+=e.relevance),S.__addSublanguage(e._emitter,e.language) +})():c(),A=""}function g(e,n){ +""!==e&&(S.startScope(n),S.addText(e),S.endScope())}function u(e,n){let t=1 +;const a=n.length-1;for(;t<=a;){if(!e._emit[t]){t++;continue} +const a=w.classNameAliases[e[t]]||e[t],i=n[t];a?g(i,a):(A=i,c(),A=""),t++}} +function b(e,n){ +return e.scope&&"string"==typeof e.scope&&S.openNode(w.classNameAliases[e.scope]||e.scope), +e.beginScope&&(e.beginScope._wrap?(g(A,w.classNameAliases[e.beginScope._wrap]||e.beginScope._wrap), +A=""):e.beginScope._multi&&(u(e.beginScope,n),A="")),k=Object.create(e,{parent:{ +value:k}}),k}function m(e,t,a){let i=((e,n)=>{const t=e&&e.exec(n) +;return t&&0===t.index})(e.endRe,a);if(i){if(e["on:end"]){const a=new n(e) +;e["on:end"](t,a),a.isMatchIgnored&&(i=!1)}if(i){ +for(;e.endsParent&&e.parent;)e=e.parent;return e}} +if(e.endsWithParent)return m(e.parent,t,a)}function _(e){ +return 0===k.matcher.regexIndex?(A+=e[0],1):(I=!0,0)}function h(e){ +const n=e[0],a=t.substring(e.index),i=m(k,e,a);if(!i)return ee;const r=k +;k.endScope&&k.endScope._wrap?(d(), +g(n,k.endScope._wrap)):k.endScope&&k.endScope._multi?(d(), +u(k.endScope,e)):r.skip?A+=n:(r.returnEnd||r.excludeEnd||(A+=n), +d(),r.excludeEnd&&(A=n));do{ +k.scope&&S.closeNode(),k.skip||k.subLanguage||(C+=k.relevance),k=k.parent +}while(k!==i.parent);return i.starts&&b(i.starts,e),r.returnEnd?0:n.length} +let y={};function N(a,r){const o=r&&r[0];if(A+=a,null==o)return d(),0 +;if("begin"===y.type&&"end"===r.type&&y.index===r.index&&""===o){ +if(A+=t.slice(r.index,r.index+1),!s){const n=Error(`0 width match regex (${e})`) +;throw n.languageName=e,n.badRule=y.rule,n}return 1} +if(y=r,"begin"===r.type)return(e=>{ +const t=e[0],a=e.rule,i=new n(a),r=[a.__beforeBegin,a["on:begin"]] +;for(const n of r)if(n&&(n(e,i),i.isMatchIgnored))return _(t) +;return a.skip?A+=t:(a.excludeBegin&&(A+=t), +d(),a.returnBegin||a.excludeBegin||(A=t)),b(a,e),a.returnBegin?0:t.length})(r) +;if("illegal"===r.type&&!i){ +const e=Error('Illegal lexeme "'+o+'" for mode "'+(k.scope||"")+'"') +;throw e.mode=k,e}if("end"===r.type){const e=h(r);if(e!==ee)return e} +if("illegal"===r.type&&""===o)return 1 +;if(R>1e5&&R>3*r.index)throw Error("potential infinite loop, way more iterations than matches") +;return A+=o,o.length}const w=v(e) +;if(!w)throw K(o.replace("{}",e)),Error('Unknown language: "'+e+'"') +;const O=Q(w);let x="",k=r||O;const M={},S=new p.__emitter(p);(()=>{const e=[] +;for(let n=k;n!==w;n=n.parent)n.scope&&e.unshift(n.scope) +;e.forEach((e=>S.openNode(e)))})();let A="",C=0,T=0,R=0,I=!1;try{ +if(w.__emitTokens)w.__emitTokens(t,S);else{for(k.matcher.considerAll();;){ +R++,I?I=!1:k.matcher.considerAll(),k.matcher.lastIndex=T +;const e=k.matcher.exec(t);if(!e)break;const n=N(t.substring(T,e.index),e) +;T=e.index+n}N(t.substring(T))}return S.finalize(),x=S.toHTML(),{language:e, +value:x,relevance:C,illegal:!1,_emitter:S,_top:k}}catch(n){ +if(n.message&&n.message.includes("Illegal"))return{language:e,value:J(t), +illegal:!0,relevance:0,_illegalBy:{message:n.message,index:T, +context:t.slice(T-100,T+100),mode:n.mode,resultSoFar:x},_emitter:S};if(s)return{ +language:e,value:J(t),illegal:!1,relevance:0,errorRaised:n,_emitter:S,_top:k} +;throw n}}function E(e,n){n=n||p.languages||Object.keys(a);const t=(e=>{ +const n={value:J(e),illegal:!1,relevance:0,_top:c,_emitter:new p.__emitter(p)} +;return n._emitter.addText(e),n})(e),i=n.filter(v).filter(x).map((n=>f(n,e,!1))) +;i.unshift(t);const r=i.sort(((e,n)=>{ +if(e.relevance!==n.relevance)return n.relevance-e.relevance +;if(e.language&&n.language){if(v(e.language).supersetOf===n.language)return 1 +;if(v(n.language).supersetOf===e.language)return-1}return 0})),[s,o]=r,l=s +;return l.secondBest=o,l}function y(e){let n=null;const t=(e=>{ +let n=e.className+" ";n+=e.parentNode?e.parentNode.className:"" +;const t=p.languageDetectRe.exec(n);if(t){const n=v(t[1]) +;return n||(H(o.replace("{}",t[1])), +H("Falling back to no-highlight mode for this block.",e)),n?t[1]:"no-highlight"} +return n.split(/\s+/).find((e=>_(e)||v(e)))})(e);if(_(t))return +;if(k("before:highlightElement",{el:e,language:t +}),e.dataset.highlighted)return void console.log("Element previously highlighted. To highlight again, first unset `dataset.highlighted`.",e) +;if(e.children.length>0&&(p.ignoreUnescapedHTML||(console.warn("One of your code blocks includes unescaped HTML. This is a potentially serious security risk."), +console.warn("https://github.com/highlightjs/highlight.js/wiki/security"), +console.warn("The element with unescaped HTML:"), +console.warn(e)),p.throwUnescapedHTML))throw new V("One of your code blocks includes unescaped HTML.",e.innerHTML) +;n=e;const a=n.textContent,r=t?h(a,{language:t,ignoreIllegals:!0}):E(a) +;e.innerHTML=r.value,e.dataset.highlighted="yes",((e,n,t)=>{const a=n&&i[n]||t +;e.classList.add("hljs"),e.classList.add("language-"+a) +})(e,t,r.language),e.result={language:r.language,re:r.relevance, +relevance:r.relevance},r.secondBest&&(e.secondBest={ +language:r.secondBest.language,relevance:r.secondBest.relevance +}),k("after:highlightElement",{el:e,result:r,text:a})}let N=!1;function w(){ +"loading"!==document.readyState?document.querySelectorAll(p.cssSelector).forEach(y):N=!0 +}function v(e){return e=(e||"").toLowerCase(),a[e]||a[i[e]]} +function O(e,{languageName:n}){"string"==typeof e&&(e=[e]),e.forEach((e=>{ +i[e.toLowerCase()]=n}))}function x(e){const n=v(e) +;return n&&!n.disableAutodetect}function k(e,n){const t=e;r.forEach((e=>{ +e[t]&&e[t](n)}))} +"undefined"!=typeof window&&window.addEventListener&&window.addEventListener("DOMContentLoaded",(()=>{ +N&&w()}),!1),Object.assign(t,{highlight:h,highlightAuto:E,highlightAll:w, +highlightElement:y, +highlightBlock:e=>(q("10.7.0","highlightBlock will be removed entirely in v12.0"), +q("10.7.0","Please use highlightElement now."),y(e)),configure:e=>{p=Y(p,e)}, +initHighlighting:()=>{ +w(),q("10.6.0","initHighlighting() deprecated. Use highlightAll() now.")}, +initHighlightingOnLoad:()=>{ +w(),q("10.6.0","initHighlightingOnLoad() deprecated. Use highlightAll() now.") +},registerLanguage:(e,n)=>{let i=null;try{i=n(t)}catch(n){ +if(K("Language definition for '{}' could not be registered.".replace("{}",e)), +!s)throw n;K(n),i=c} +i.name||(i.name=e),a[e]=i,i.rawDefinition=n.bind(null,t),i.aliases&&O(i.aliases,{ +languageName:e})},unregisterLanguage:e=>{delete a[e] +;for(const n of Object.keys(i))i[n]===e&&delete i[n]}, +listLanguages:()=>Object.keys(a),getLanguage:v,registerAliases:O, +autoDetection:x,inherit:Y,addPlugin:e=>{(e=>{ +e["before:highlightBlock"]&&!e["before:highlightElement"]&&(e["before:highlightElement"]=n=>{ +e["before:highlightBlock"](Object.assign({block:n.el},n)) +}),e["after:highlightBlock"]&&!e["after:highlightElement"]&&(e["after:highlightElement"]=n=>{ +e["after:highlightBlock"](Object.assign({block:n.el},n))})})(e),r.push(e)}, +removePlugin:e=>{const n=r.indexOf(e);-1!==n&&r.splice(n,1)}}),t.debugMode=()=>{ +s=!1},t.safeMode=()=>{s=!0},t.versionString="11.9.0",t.regex={concat:b, +lookahead:d,either:m,optional:u,anyNumberOfTimes:g} +;for(const n in C)"object"==typeof C[n]&&e(C[n]);return Object.assign(t,C),t +},te=ne({});te.newInstance=()=>ne({});var ae=te;const ie=e=>({IMPORTANT:{ +scope:"meta",begin:"!important"},BLOCK_COMMENT:e.C_BLOCK_COMMENT_MODE,HEXCOLOR:{ +scope:"number",begin:/#(([0-9a-fA-F]{3,4})|(([0-9a-fA-F]{2}){3,4}))\b/}, +FUNCTION_DISPATCH:{className:"built_in",begin:/[\w-]+(?=\()/}, +ATTRIBUTE_SELECTOR_MODE:{scope:"selector-attr",begin:/\[/,end:/\]/,illegal:"$", +contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},CSS_NUMBER_MODE:{ +scope:"number", +begin:e.NUMBER_RE+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?", +relevance:0},CSS_VARIABLE:{className:"attr",begin:/--[A-Za-z_][A-Za-z0-9_-]*/} +}),re=["a","abbr","address","article","aside","audio","b","blockquote","body","button","canvas","caption","cite","code","dd","del","details","dfn","div","dl","dt","em","fieldset","figcaption","figure","footer","form","h1","h2","h3","h4","h5","h6","header","hgroup","html","i","iframe","img","input","ins","kbd","label","legend","li","main","mark","menu","nav","object","ol","p","q","quote","samp","section","span","strong","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","tr","ul","var","video"],se=["any-hover","any-pointer","aspect-ratio","color","color-gamut","color-index","device-aspect-ratio","device-height","device-width","display-mode","forced-colors","grid","height","hover","inverted-colors","monochrome","orientation","overflow-block","overflow-inline","pointer","prefers-color-scheme","prefers-contrast","prefers-reduced-motion","prefers-reduced-transparency","resolution","scan","scripting","update","width","min-width","max-width","min-height","max-height"],oe=["active","any-link","blank","checked","current","default","defined","dir","disabled","drop","empty","enabled","first","first-child","first-of-type","fullscreen","future","focus","focus-visible","focus-within","has","host","host-context","hover","indeterminate","in-range","invalid","is","lang","last-child","last-of-type","left","link","local-link","not","nth-child","nth-col","nth-last-child","nth-last-col","nth-last-of-type","nth-of-type","only-child","only-of-type","optional","out-of-range","past","placeholder-shown","read-only","read-write","required","right","root","scope","target","target-within","user-invalid","valid","visited","where"],le=["after","backdrop","before","cue","cue-region","first-letter","first-line","grammar-error","marker","part","placeholder","selection","slotted","spelling-error"],ce=["align-content","align-items","align-self","all","animation","animation-delay","animation-direction","animation-duration","animation-fill-mode","animation-iteration-count","animation-name","animation-play-state","animation-timing-function","backface-visibility","background","background-attachment","background-blend-mode","background-clip","background-color","background-image","background-origin","background-position","background-repeat","background-size","block-size","border","border-block","border-block-color","border-block-end","border-block-end-color","border-block-end-style","border-block-end-width","border-block-start","border-block-start-color","border-block-start-style","border-block-start-width","border-block-style","border-block-width","border-bottom","border-bottom-color","border-bottom-left-radius","border-bottom-right-radius","border-bottom-style","border-bottom-width","border-collapse","border-color","border-image","border-image-outset","border-image-repeat","border-image-slice","border-image-source","border-image-width","border-inline","border-inline-color","border-inline-end","border-inline-end-color","border-inline-end-style","border-inline-end-width","border-inline-start","border-inline-start-color","border-inline-start-style","border-inline-start-width","border-inline-style","border-inline-width","border-left","border-left-color","border-left-style","border-left-width","border-radius","border-right","border-right-color","border-right-style","border-right-width","border-spacing","border-style","border-top","border-top-color","border-top-left-radius","border-top-right-radius","border-top-style","border-top-width","border-width","bottom","box-decoration-break","box-shadow","box-sizing","break-after","break-before","break-inside","caption-side","caret-color","clear","clip","clip-path","clip-rule","color","column-count","column-fill","column-gap","column-rule","column-rule-color","column-rule-style","column-rule-width","column-span","column-width","columns","contain","content","content-visibility","counter-increment","counter-reset","cue","cue-after","cue-before","cursor","direction","display","empty-cells","filter","flex","flex-basis","flex-direction","flex-flow","flex-grow","flex-shrink","flex-wrap","float","flow","font","font-display","font-family","font-feature-settings","font-kerning","font-language-override","font-size","font-size-adjust","font-smoothing","font-stretch","font-style","font-synthesis","font-variant","font-variant-caps","font-variant-east-asian","font-variant-ligatures","font-variant-numeric","font-variant-position","font-variation-settings","font-weight","gap","glyph-orientation-vertical","grid","grid-area","grid-auto-columns","grid-auto-flow","grid-auto-rows","grid-column","grid-column-end","grid-column-start","grid-gap","grid-row","grid-row-end","grid-row-start","grid-template","grid-template-areas","grid-template-columns","grid-template-rows","hanging-punctuation","height","hyphens","icon","image-orientation","image-rendering","image-resolution","ime-mode","inline-size","isolation","justify-content","left","letter-spacing","line-break","line-height","list-style","list-style-image","list-style-position","list-style-type","margin","margin-block","margin-block-end","margin-block-start","margin-bottom","margin-inline","margin-inline-end","margin-inline-start","margin-left","margin-right","margin-top","marks","mask","mask-border","mask-border-mode","mask-border-outset","mask-border-repeat","mask-border-slice","mask-border-source","mask-border-width","mask-clip","mask-composite","mask-image","mask-mode","mask-origin","mask-position","mask-repeat","mask-size","mask-type","max-block-size","max-height","max-inline-size","max-width","min-block-size","min-height","min-inline-size","min-width","mix-blend-mode","nav-down","nav-index","nav-left","nav-right","nav-up","none","normal","object-fit","object-position","opacity","order","orphans","outline","outline-color","outline-offset","outline-style","outline-width","overflow","overflow-wrap","overflow-x","overflow-y","padding","padding-block","padding-block-end","padding-block-start","padding-bottom","padding-inline","padding-inline-end","padding-inline-start","padding-left","padding-right","padding-top","page-break-after","page-break-before","page-break-inside","pause","pause-after","pause-before","perspective","perspective-origin","pointer-events","position","quotes","resize","rest","rest-after","rest-before","right","row-gap","scroll-margin","scroll-margin-block","scroll-margin-block-end","scroll-margin-block-start","scroll-margin-bottom","scroll-margin-inline","scroll-margin-inline-end","scroll-margin-inline-start","scroll-margin-left","scroll-margin-right","scroll-margin-top","scroll-padding","scroll-padding-block","scroll-padding-block-end","scroll-padding-block-start","scroll-padding-bottom","scroll-padding-inline","scroll-padding-inline-end","scroll-padding-inline-start","scroll-padding-left","scroll-padding-right","scroll-padding-top","scroll-snap-align","scroll-snap-stop","scroll-snap-type","scrollbar-color","scrollbar-gutter","scrollbar-width","shape-image-threshold","shape-margin","shape-outside","speak","speak-as","src","tab-size","table-layout","text-align","text-align-all","text-align-last","text-combine-upright","text-decoration","text-decoration-color","text-decoration-line","text-decoration-style","text-emphasis","text-emphasis-color","text-emphasis-position","text-emphasis-style","text-indent","text-justify","text-orientation","text-overflow","text-rendering","text-shadow","text-transform","text-underline-position","top","transform","transform-box","transform-origin","transform-style","transition","transition-delay","transition-duration","transition-property","transition-timing-function","unicode-bidi","vertical-align","visibility","voice-balance","voice-duration","voice-family","voice-pitch","voice-range","voice-rate","voice-stress","voice-volume","white-space","widows","width","will-change","word-break","word-spacing","word-wrap","writing-mode","z-index"].reverse(),de=oe.concat(le) +;var ge="[0-9](_*[0-9])*",ue=`\\.(${ge})`,be="[0-9a-fA-F](_*[0-9a-fA-F])*",me={ +className:"number",variants:[{ +begin:`(\\b(${ge})((${ue})|\\.)?|(${ue}))[eE][+-]?(${ge})[fFdD]?\\b`},{ +begin:`\\b(${ge})((${ue})[fFdD]?\\b|\\.([fFdD]\\b)?)`},{ +begin:`(${ue})[fFdD]?\\b`},{begin:`\\b(${ge})[fFdD]\\b`},{ +begin:`\\b0[xX]((${be})\\.?|(${be})?\\.(${be}))[pP][+-]?(${ge})[fFdD]?\\b`},{ +begin:"\\b(0|[1-9](_*[0-9])*)[lL]?\\b"},{begin:`\\b0[xX](${be})[lL]?\\b`},{ +begin:"\\b0(_*[0-7])*[lL]?\\b"},{begin:"\\b0[bB][01](_*[01])*[lL]?\\b"}], +relevance:0};function pe(e,n,t){return-1===t?"":e.replace(n,(a=>pe(e,n,t-1)))} +const _e="[A-Za-z$_][0-9A-Za-z$_]*",he=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],fe=["true","false","null","undefined","NaN","Infinity"],Ee=["Object","Function","Boolean","Symbol","Math","Date","Number","BigInt","String","RegExp","Array","Float32Array","Float64Array","Int8Array","Uint8Array","Uint8ClampedArray","Int16Array","Int32Array","Uint16Array","Uint32Array","BigInt64Array","BigUint64Array","Set","Map","WeakSet","WeakMap","ArrayBuffer","SharedArrayBuffer","Atomics","DataView","JSON","Promise","Generator","GeneratorFunction","AsyncFunction","Reflect","Proxy","Intl","WebAssembly"],ye=["Error","EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"],Ne=["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],we=["arguments","this","super","console","window","document","localStorage","sessionStorage","module","global"],ve=[].concat(Ne,Ee,ye) +;function Oe(e){const n=e.regex,t=_e,a={begin:/<[A-Za-z0-9\\._:-]+/, +end:/\/[A-Za-z0-9\\._:-]+>|\/>/,isTrulyOpeningTag:(e,n)=>{ +const t=e[0].length+e.index,a=e.input[t] +;if("<"===a||","===a)return void n.ignoreMatch();let i +;">"===a&&(((e,{after:n})=>{const t="",M={ +match:[/const|var|let/,/\s+/,t,/\s*/,/=\s*/,/(async\s*)?/,n.lookahead(k)], +keywords:"async",className:{1:"keyword",3:"title.function"},contains:[f]} +;return{name:"JavaScript",aliases:["js","jsx","mjs","cjs"],keywords:i,exports:{ +PARAMS_CONTAINS:h,CLASS_REFERENCE:y},illegal:/#(?![$_A-z])/, +contains:[e.SHEBANG({label:"shebang",binary:"node",relevance:5}),{ +label:"use_strict",className:"meta",relevance:10, +begin:/^\s*['"]use (strict|asm)['"]/ +},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,d,g,u,b,m,{match:/\$\d+/},l,y,{ +className:"attr",begin:t+n.lookahead(":"),relevance:0},M,{ +begin:"("+e.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*", +keywords:"return throw case",relevance:0,contains:[m,e.REGEXP_MODE,{ +className:"function",begin:k,returnBegin:!0,end:"\\s*=>",contains:[{ +className:"params",variants:[{begin:e.UNDERSCORE_IDENT_RE,relevance:0},{ +className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0, +excludeEnd:!0,keywords:i,contains:h}]}]},{begin:/,/,relevance:0},{match:/\s+/, +relevance:0},{variants:[{begin:"<>",end:""},{ +match:/<[A-Za-z0-9\\._:-]+\s*\/>/},{begin:a.begin, +"on:begin":a.isTrulyOpeningTag,end:a.end}],subLanguage:"xml",contains:[{ +begin:a.begin,end:a.end,skip:!0,contains:["self"]}]}]},N,{ +beginKeywords:"while if switch catch for"},{ +begin:"\\b(?!function)"+e.UNDERSCORE_IDENT_RE+"\\([^()]*(\\([^()]*(\\([^()]*\\)[^()]*)*\\)[^()]*)*\\)\\s*\\{", +returnBegin:!0,label:"func.def",contains:[f,e.inherit(e.TITLE_MODE,{begin:t, +className:"title.function"})]},{match:/\.\.\./,relevance:0},O,{match:"\\$"+t, +relevance:0},{match:[/\bconstructor(?=\s*\()/],className:{1:"title.function"}, +contains:[f]},w,{relevance:0,match:/\b[A-Z][A-Z_0-9]+\b/, +className:"variable.constant"},E,x,{match:/\$[(.]/}]}}function xe(e){const n={ +keyword:"base|10 prefix|10 @base|10 @prefix|10",literal:"true|0 false|0", +built_in:"a|0"},t={className:"literal",relevance:1,begin://, +illegal:/[^\x00-\x20<>"{}|^`]/ +},a="A-Za-z\xc0-\xd6\xd8-\xf6\xf8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd\u10000-\uefffF",i=a+"_",r="-"+i+"0-9\xb7\u0300-\u036f\u203f-\u2040",s="%[0-9A-Fa-f][0-9A-Fa-f]|\\\\[_~.!$&'()*+,;=/?#@%-]",o="(["+a+"](["+r+".]*["+r+"])?)?:",l="(["+i+":0-9]|"+s+")(["+r+".:]|"+s+")*(["+r+":]|"+s+")?",c={ +begin:o+"("+l+")?",relevance:0,className:"symbol"},d={ +begin:"_:["+i+"0-9](["+r+".]*["+r+"])?",relevance:10, +className:"template-variable"},g={begin:/@[a-zA-Z]+([a-zA-Z0-9-]+)*/, +className:"type",relevance:5},u={begin:"\\^\\^"+(o+l),className:"type", +relevance:10},b={begin:/'''/,end:/'''/,className:"string",relevance:0},m={ +begin:/"""/,end:/"""/,className:"string",relevance:0 +},p=JSON.parse(JSON.stringify(e.APOS_STRING_MODE));p.relevance=0 +;const _=JSON.parse(JSON.stringify(e.QUOTE_STRING_MODE));_.relevance=0 +;const h=JSON.parse(JSON.stringify(e.C_NUMBER_MODE));return h.relevance=0,{ +case_insensitive:!0,keywords:n,name:"Turtle", +aliases:["turtle","ttl","n3","ntriples"], +contains:[g,u,t,d,c,b,m,p,_,h,e.HASH_COMMENT_MODE],exports:{LANGTAG:g, +DATATYPE:u,IRI_LITERAL:t,BLANK_NODE:d,PNAME:c,TRIPLE_APOS_STRING:b, +TRIPLE_QUOTE_STRING:m,APOS_STRING_LITERAL:p,QUOTE_STRING_LITERAL:_,NUMBER:h, +KEYWORDS:n}}} +const ke=e=>b(/\b/,e,/\w$/.test(e)?/\b/:/\B/),Me=["Protocol","Type"].map(ke),Se=["init","self"].map(ke),Ae=["Any","Self"],Ce=["actor","any","associatedtype","async","await",/as\?/,/as!/,"as","borrowing","break","case","catch","class","consume","consuming","continue","convenience","copy","default","defer","deinit","didSet","distributed","do","dynamic","each","else","enum","extension","fallthrough",/fileprivate\(set\)/,"fileprivate","final","for","func","get","guard","if","import","indirect","infix",/init\?/,/init!/,"inout",/internal\(set\)/,"internal","in","is","isolated","nonisolated","lazy","let","macro","mutating","nonmutating",/open\(set\)/,"open","operator","optional","override","postfix","precedencegroup","prefix",/private\(set\)/,"private","protocol",/public\(set\)/,"public","repeat","required","rethrows","return","set","some","static","struct","subscript","super","switch","throws","throw",/try\?/,/try!/,"try","typealias",/unowned\(safe\)/,/unowned\(unsafe\)/,"unowned","var","weak","where","while","willSet"],Te=["false","nil","true"],Re=["assignment","associativity","higherThan","left","lowerThan","none","right"],Ie=["#colorLiteral","#column","#dsohandle","#else","#elseif","#endif","#error","#file","#fileID","#fileLiteral","#filePath","#function","#if","#imageLiteral","#keyPath","#line","#selector","#sourceLocation","#warning"],De=["abs","all","any","assert","assertionFailure","debugPrint","dump","fatalError","getVaList","isKnownUniquelyReferenced","max","min","numericCast","pointwiseMax","pointwiseMin","precondition","preconditionFailure","print","readLine","repeatElement","sequence","stride","swap","swift_unboxFromSwiftValueWithType","transcode","type","unsafeBitCast","unsafeDowncast","withExtendedLifetime","withUnsafeMutablePointer","withUnsafePointer","withVaList","withoutActuallyEscaping","zip"],Le=m(/[/=\-+!*%<>&|^~?]/,/[\u00A1-\u00A7]/,/[\u00A9\u00AB]/,/[\u00AC\u00AE]/,/[\u00B0\u00B1]/,/[\u00B6\u00BB\u00BF\u00D7\u00F7]/,/[\u2016-\u2017]/,/[\u2020-\u2027]/,/[\u2030-\u203E]/,/[\u2041-\u2053]/,/[\u2055-\u205E]/,/[\u2190-\u23FF]/,/[\u2500-\u2775]/,/[\u2794-\u2BFF]/,/[\u2E00-\u2E7F]/,/[\u3001-\u3003]/,/[\u3008-\u3020]/,/[\u3030]/),Be=m(Le,/[\u0300-\u036F]/,/[\u1DC0-\u1DFF]/,/[\u20D0-\u20FF]/,/[\uFE00-\uFE0F]/,/[\uFE20-\uFE2F]/),$e=b(Le,Be,"*"),ze=m(/[a-zA-Z_]/,/[\u00A8\u00AA\u00AD\u00AF\u00B2-\u00B5\u00B7-\u00BA]/,/[\u00BC-\u00BE\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF]/,/[\u0100-\u02FF\u0370-\u167F\u1681-\u180D\u180F-\u1DBF]/,/[\u1E00-\u1FFF]/,/[\u200B-\u200D\u202A-\u202E\u203F-\u2040\u2054\u2060-\u206F]/,/[\u2070-\u20CF\u2100-\u218F\u2460-\u24FF\u2776-\u2793]/,/[\u2C00-\u2DFF\u2E80-\u2FFF]/,/[\u3004-\u3007\u3021-\u302F\u3031-\u303F\u3040-\uD7FF]/,/[\uF900-\uFD3D\uFD40-\uFDCF\uFDF0-\uFE1F\uFE30-\uFE44]/,/[\uFE47-\uFEFE\uFF00-\uFFFD]/),Fe=m(ze,/\d/,/[\u0300-\u036F\u1DC0-\u1DFF\u20D0-\u20FF\uFE20-\uFE2F]/),Ue=b(ze,Fe,"*"),Pe=b(/[A-Z]/,Fe,"*"),je=["attached","autoclosure",b(/convention\(/,m("swift","block","c"),/\)/),"discardableResult","dynamicCallable","dynamicMemberLookup","escaping","freestanding","frozen","GKInspectable","IBAction","IBDesignable","IBInspectable","IBOutlet","IBSegueAction","inlinable","main","nonobjc","NSApplicationMain","NSCopying","NSManaged",b(/objc\(/,Ue,/\)/),"objc","objcMembers","propertyWrapper","requires_stored_property_inits","resultBuilder","Sendable","testable","UIApplicationMain","unchecked","unknown","usableFromInline","warn_unqualified_access"],Ke=["iOS","iOSApplicationExtension","macOS","macOSApplicationExtension","macCatalyst","macCatalystApplicationExtension","watchOS","watchOSApplicationExtension","tvOS","tvOSApplicationExtension","swift"] +;var He=Object.freeze({__proto__:null,grmr_bash:e=>{const n=e.regex,t={},a={ +begin:/\$\{/,end:/\}/,contains:["self",{begin:/:-/,contains:[t]}]} +;Object.assign(t,{className:"variable",variants:[{ +begin:n.concat(/\$[\w\d#@][\w\d_]*/,"(?![\\w\\d])(?![$])")},a]});const i={ +className:"subst",begin:/\$\(/,end:/\)/,contains:[e.BACKSLASH_ESCAPE]},r={ +begin:/<<-?\s*(?=\w+)/,starts:{contains:[e.END_SAME_AS_BEGIN({begin:/(\w+)/, +end:/(\w+)/,className:"string"})]}},s={className:"string",begin:/"/,end:/"/, +contains:[e.BACKSLASH_ESCAPE,t,i]};i.contains.push(s);const o={begin:/\$?\(\(/, +end:/\)\)/,contains:[{begin:/\d+#[0-9a-f]+/,className:"number"},e.NUMBER_MODE,t] +},l=e.SHEBANG({binary:"(fish|bash|zsh|sh|csh|ksh|tcsh|dash|scsh)",relevance:10 +}),c={className:"function",begin:/\w[\w\d_]*\s*\(\s*\)\s*\{/,returnBegin:!0, +contains:[e.inherit(e.TITLE_MODE,{begin:/\w[\w\d_]*/})],relevance:0};return{ +name:"Bash",aliases:["sh"],keywords:{$pattern:/\b[a-z][a-z0-9._-]+\b/, +keyword:["if","then","else","elif","fi","for","while","until","in","do","done","case","esac","function","select"], +literal:["true","false"], +built_in:["break","cd","continue","eval","exec","exit","export","getopts","hash","pwd","readonly","return","shift","test","times","trap","umask","unset","alias","bind","builtin","caller","command","declare","echo","enable","help","let","local","logout","mapfile","printf","read","readarray","source","type","typeset","ulimit","unalias","set","shopt","autoload","bg","bindkey","bye","cap","chdir","clone","comparguments","compcall","compctl","compdescribe","compfiles","compgroups","compquote","comptags","comptry","compvalues","dirs","disable","disown","echotc","echoti","emulate","fc","fg","float","functions","getcap","getln","history","integer","jobs","kill","limit","log","noglob","popd","print","pushd","pushln","rehash","sched","setcap","setopt","stat","suspend","ttyctl","unfunction","unhash","unlimit","unsetopt","vared","wait","whence","where","which","zcompile","zformat","zftp","zle","zmodload","zparseopts","zprof","zpty","zregexparse","zsocket","zstyle","ztcp","chcon","chgrp","chown","chmod","cp","dd","df","dir","dircolors","ln","ls","mkdir","mkfifo","mknod","mktemp","mv","realpath","rm","rmdir","shred","sync","touch","truncate","vdir","b2sum","base32","base64","cat","cksum","comm","csplit","cut","expand","fmt","fold","head","join","md5sum","nl","numfmt","od","paste","ptx","pr","sha1sum","sha224sum","sha256sum","sha384sum","sha512sum","shuf","sort","split","sum","tac","tail","tr","tsort","unexpand","uniq","wc","arch","basename","chroot","date","dirname","du","echo","env","expr","factor","groups","hostid","id","link","logname","nice","nohup","nproc","pathchk","pinky","printenv","printf","pwd","readlink","runcon","seq","sleep","stat","stdbuf","stty","tee","test","timeout","tty","uname","unlink","uptime","users","who","whoami","yes"] +},contains:[l,e.SHEBANG(),c,o,e.HASH_COMMENT_MODE,r,{match:/(\/[a-z._-]+)+/},s,{ +match:/\\"/},{className:"string",begin:/'/,end:/'/},{match:/\\'/},t]}}, +grmr_c:e=>{const n=e.regex,t=e.COMMENT("//","$",{contains:[{begin:/\\\n/}] +}),a="decltype\\(auto\\)",i="[a-zA-Z_]\\w*::",r="("+a+"|"+n.optional(i)+"[a-zA-Z_]\\w*"+n.optional("<[^<>]+>")+")",s={ +className:"type",variants:[{begin:"\\b[a-z\\d_]*_t\\b"},{ +match:/\batomic_[a-z]{3,6}\b/}]},o={className:"string",variants:[{ +begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{ +begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)", +end:"'",illegal:"."},e.END_SAME_AS_BEGIN({ +begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/})]},l={ +className:"number",variants:[{begin:"\\b(0b[01']+)"},{ +begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)((ll|LL|l|L)(u|U)?|(u|U)(ll|LL|l|L)?|f|F|b|B)" +},{ +begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)" +}],relevance:0},c={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{ +keyword:"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include" +},contains:[{begin:/\\\n/,relevance:0},e.inherit(o,{className:"string"}),{ +className:"string",begin:/<.*?>/},t,e.C_BLOCK_COMMENT_MODE]},d={ +className:"title",begin:n.optional(i)+e.IDENT_RE,relevance:0 +},g=n.optional(i)+e.IDENT_RE+"\\s*\\(",u={ +keyword:["asm","auto","break","case","continue","default","do","else","enum","extern","for","fortran","goto","if","inline","register","restrict","return","sizeof","struct","switch","typedef","union","volatile","while","_Alignas","_Alignof","_Atomic","_Generic","_Noreturn","_Static_assert","_Thread_local","alignas","alignof","noreturn","static_assert","thread_local","_Pragma"], +type:["float","double","signed","unsigned","int","short","long","char","void","_Bool","_Complex","_Imaginary","_Decimal32","_Decimal64","_Decimal128","const","static","complex","bool","imaginary"], +literal:"true false NULL", +built_in:"std string wstring cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set pair bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap priority_queue make_pair array shared_ptr abort terminate abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf future isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr" +},b=[c,s,t,e.C_BLOCK_COMMENT_MODE,l,o],m={variants:[{begin:/=/,end:/;/},{ +begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/}], +keywords:u,contains:b.concat([{begin:/\(/,end:/\)/,keywords:u, +contains:b.concat(["self"]),relevance:0}]),relevance:0},p={ +begin:"("+r+"[\\*&\\s]+)+"+g,returnBegin:!0,end:/[{;=]/,excludeEnd:!0, +keywords:u,illegal:/[^\w\s\*&:<>.]/,contains:[{begin:a,keywords:u,relevance:0},{ +begin:g,returnBegin:!0,contains:[e.inherit(d,{className:"title.function"})], +relevance:0},{relevance:0,match:/,/},{className:"params",begin:/\(/,end:/\)/, +keywords:u,relevance:0,contains:[t,e.C_BLOCK_COMMENT_MODE,o,l,s,{begin:/\(/, +end:/\)/,keywords:u,relevance:0,contains:["self",t,e.C_BLOCK_COMMENT_MODE,o,l,s] +}]},s,t,e.C_BLOCK_COMMENT_MODE,c]};return{name:"C",aliases:["h"],keywords:u, +disableAutodetect:!0,illegal:"=]/,contains:[{ +beginKeywords:"final class struct"},e.TITLE_MODE]}]),exports:{preprocessor:c, +strings:o,keywords:u}}},grmr_cpp:e=>{const n=e.regex,t=e.COMMENT("//","$",{ +contains:[{begin:/\\\n/}] +}),a="decltype\\(auto\\)",i="[a-zA-Z_]\\w*::",r="(?!struct)("+a+"|"+n.optional(i)+"[a-zA-Z_]\\w*"+n.optional("<[^<>]+>")+")",s={ +className:"type",begin:"\\b[a-z\\d_]*_t\\b"},o={className:"string",variants:[{ +begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{ +begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)", +end:"'",illegal:"."},e.END_SAME_AS_BEGIN({ +begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/})]},l={ +className:"number",variants:[{begin:"\\b(0b[01']+)"},{ +begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)((ll|LL|l|L)(u|U)?|(u|U)(ll|LL|l|L)?|f|F|b|B)" +},{ +begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)" +}],relevance:0},c={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{ +keyword:"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include" +},contains:[{begin:/\\\n/,relevance:0},e.inherit(o,{className:"string"}),{ +className:"string",begin:/<.*?>/},t,e.C_BLOCK_COMMENT_MODE]},d={ +className:"title",begin:n.optional(i)+e.IDENT_RE,relevance:0 +},g=n.optional(i)+e.IDENT_RE+"\\s*\\(",u={ +type:["bool","char","char16_t","char32_t","char8_t","double","float","int","long","short","void","wchar_t","unsigned","signed","const","static"], +keyword:["alignas","alignof","and","and_eq","asm","atomic_cancel","atomic_commit","atomic_noexcept","auto","bitand","bitor","break","case","catch","class","co_await","co_return","co_yield","compl","concept","const_cast|10","consteval","constexpr","constinit","continue","decltype","default","delete","do","dynamic_cast|10","else","enum","explicit","export","extern","false","final","for","friend","goto","if","import","inline","module","mutable","namespace","new","noexcept","not","not_eq","nullptr","operator","or","or_eq","override","private","protected","public","reflexpr","register","reinterpret_cast|10","requires","return","sizeof","static_assert","static_cast|10","struct","switch","synchronized","template","this","thread_local","throw","transaction_safe","transaction_safe_dynamic","true","try","typedef","typeid","typename","union","using","virtual","volatile","while","xor","xor_eq"], +literal:["NULL","false","nullopt","nullptr","true"],built_in:["_Pragma"], +_type_hints:["any","auto_ptr","barrier","binary_semaphore","bitset","complex","condition_variable","condition_variable_any","counting_semaphore","deque","false_type","future","imaginary","initializer_list","istringstream","jthread","latch","lock_guard","multimap","multiset","mutex","optional","ostringstream","packaged_task","pair","promise","priority_queue","queue","recursive_mutex","recursive_timed_mutex","scoped_lock","set","shared_future","shared_lock","shared_mutex","shared_timed_mutex","shared_ptr","stack","string_view","stringstream","timed_mutex","thread","true_type","tuple","unique_lock","unique_ptr","unordered_map","unordered_multimap","unordered_multiset","unordered_set","variant","vector","weak_ptr","wstring","wstring_view"] +},b={className:"function.dispatch",relevance:0,keywords:{ +_hint:["abort","abs","acos","apply","as_const","asin","atan","atan2","calloc","ceil","cerr","cin","clog","cos","cosh","cout","declval","endl","exchange","exit","exp","fabs","floor","fmod","forward","fprintf","fputs","free","frexp","fscanf","future","invoke","isalnum","isalpha","iscntrl","isdigit","isgraph","islower","isprint","ispunct","isspace","isupper","isxdigit","labs","launder","ldexp","log","log10","make_pair","make_shared","make_shared_for_overwrite","make_tuple","make_unique","malloc","memchr","memcmp","memcpy","memset","modf","move","pow","printf","putchar","puts","realloc","scanf","sin","sinh","snprintf","sprintf","sqrt","sscanf","std","stderr","stdin","stdout","strcat","strchr","strcmp","strcpy","strcspn","strlen","strncat","strncmp","strncpy","strpbrk","strrchr","strspn","strstr","swap","tan","tanh","terminate","to_underlying","tolower","toupper","vfprintf","visit","vprintf","vsprintf"] +}, +begin:n.concat(/\b/,/(?!decltype)/,/(?!if)/,/(?!for)/,/(?!switch)/,/(?!while)/,e.IDENT_RE,n.lookahead(/(<[^<>]+>|)\s*\(/)) +},m=[b,c,s,t,e.C_BLOCK_COMMENT_MODE,l,o],p={variants:[{begin:/=/,end:/;/},{ +begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/}], +keywords:u,contains:m.concat([{begin:/\(/,end:/\)/,keywords:u, +contains:m.concat(["self"]),relevance:0}]),relevance:0},_={className:"function", +begin:"("+r+"[\\*&\\s]+)+"+g,returnBegin:!0,end:/[{;=]/,excludeEnd:!0, +keywords:u,illegal:/[^\w\s\*&:<>.]/,contains:[{begin:a,keywords:u,relevance:0},{ +begin:g,returnBegin:!0,contains:[d],relevance:0},{begin:/::/,relevance:0},{ +begin:/:/,endsWithParent:!0,contains:[o,l]},{relevance:0,match:/,/},{ +className:"params",begin:/\(/,end:/\)/,keywords:u,relevance:0, +contains:[t,e.C_BLOCK_COMMENT_MODE,o,l,s,{begin:/\(/,end:/\)/,keywords:u, +relevance:0,contains:["self",t,e.C_BLOCK_COMMENT_MODE,o,l,s]}] +},s,t,e.C_BLOCK_COMMENT_MODE,c]};return{name:"C++", +aliases:["cc","c++","h++","hpp","hh","hxx","cxx"],keywords:u,illegal:"",keywords:u,contains:["self",s]},{begin:e.IDENT_RE+"::",keywords:u},{ +match:[/\b(?:enum(?:\s+(?:class|struct))?|class|struct|union)/,/\s+/,/\w+/], +className:{1:"keyword",3:"title.class"}}])}},grmr_csharp:e=>{const n={ +keyword:["abstract","as","base","break","case","catch","class","const","continue","do","else","event","explicit","extern","finally","fixed","for","foreach","goto","if","implicit","in","interface","internal","is","lock","namespace","new","operator","out","override","params","private","protected","public","readonly","record","ref","return","scoped","sealed","sizeof","stackalloc","static","struct","switch","this","throw","try","typeof","unchecked","unsafe","using","virtual","void","volatile","while"].concat(["add","alias","and","ascending","async","await","by","descending","equals","from","get","global","group","init","into","join","let","nameof","not","notnull","on","or","orderby","partial","remove","select","set","unmanaged","value|0","var","when","where","with","yield"]), +built_in:["bool","byte","char","decimal","delegate","double","dynamic","enum","float","int","long","nint","nuint","object","sbyte","short","string","ulong","uint","ushort"], +literal:["default","false","null","true"]},t=e.inherit(e.TITLE_MODE,{ +begin:"[a-zA-Z](\\.?\\w)*"}),a={className:"number",variants:[{ +begin:"\\b(0b[01']+)"},{ +begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{ +begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)" +}],relevance:0},i={className:"string",begin:'@"',end:'"',contains:[{begin:'""'}] +},r=e.inherit(i,{illegal:/\n/}),s={className:"subst",begin:/\{/,end:/\}/, +keywords:n},o=e.inherit(s,{illegal:/\n/}),l={className:"string",begin:/\$"/, +end:'"',illegal:/\n/,contains:[{begin:/\{\{/},{begin:/\}\}/ +},e.BACKSLASH_ESCAPE,o]},c={className:"string",begin:/\$@"/,end:'"',contains:[{ +begin:/\{\{/},{begin:/\}\}/},{begin:'""'},s]},d=e.inherit(c,{illegal:/\n/, +contains:[{begin:/\{\{/},{begin:/\}\}/},{begin:'""'},o]}) +;s.contains=[c,l,i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.C_BLOCK_COMMENT_MODE], +o.contains=[d,l,r,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.inherit(e.C_BLOCK_COMMENT_MODE,{ +illegal:/\n/})];const g={variants:[c,l,i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE] +},u={begin:"<",end:">",contains:[{beginKeywords:"in out"},t] +},b=e.IDENT_RE+"(<"+e.IDENT_RE+"(\\s*,\\s*"+e.IDENT_RE+")*>)?(\\[\\])?",m={ +begin:"@"+e.IDENT_RE,relevance:0};return{name:"C#",aliases:["cs","c#"], +keywords:n,illegal:/::/,contains:[e.COMMENT("///","$",{returnBegin:!0, +contains:[{className:"doctag",variants:[{begin:"///",relevance:0},{ +begin:"\x3c!--|--\x3e"},{begin:""}]}] +}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"meta",begin:"#", +end:"$",keywords:{ +keyword:"if else elif endif define undef warning error line region endregion pragma checksum" +}},g,a,{beginKeywords:"class interface",relevance:0,end:/[{;=]/, +illegal:/[^\s:,]/,contains:[{beginKeywords:"where class" +},t,u,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"namespace", +relevance:0,end:/[{;=]/,illegal:/[^\s:]/, +contains:[t,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{ +beginKeywords:"record",relevance:0,end:/[{;=]/,illegal:/[^\s:]/, +contains:[t,u,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"meta", +begin:"^\\s*\\[(?=[\\w])",excludeBegin:!0,end:"\\]",excludeEnd:!0,contains:[{ +className:"string",begin:/"/,end:/"/}]},{ +beginKeywords:"new return throw await else",relevance:0},{className:"function", +begin:"("+b+"\\s+)+"+e.IDENT_RE+"\\s*(<[^=]+>\\s*)?\\(",returnBegin:!0, +end:/\s*[{;=]/,excludeEnd:!0,keywords:n,contains:[{ +beginKeywords:"public private protected static internal protected abstract async extern override unsafe virtual new sealed partial", +relevance:0},{begin:e.IDENT_RE+"\\s*(<[^=]+>\\s*)?\\(",returnBegin:!0, +contains:[e.TITLE_MODE,u],relevance:0},{match:/\(\)/},{className:"params", +begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:n,relevance:0, +contains:[g,a,e.C_BLOCK_COMMENT_MODE] +},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},m]}},grmr_css:e=>{ +const n=e.regex,t=ie(e),a=[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE];return{ +name:"CSS",case_insensitive:!0,illegal:/[=|'\$]/,keywords:{ +keyframePosition:"from to"},classNameAliases:{keyframePosition:"selector-tag"}, +contains:[t.BLOCK_COMMENT,{begin:/-(webkit|moz|ms|o)-(?=[a-z])/ +},t.CSS_NUMBER_MODE,{className:"selector-id",begin:/#[A-Za-z0-9_-]+/,relevance:0 +},{className:"selector-class",begin:"\\.[a-zA-Z-][a-zA-Z0-9_-]*",relevance:0 +},t.ATTRIBUTE_SELECTOR_MODE,{className:"selector-pseudo",variants:[{ +begin:":("+oe.join("|")+")"},{begin:":(:)?("+le.join("|")+")"}] +},t.CSS_VARIABLE,{className:"attribute",begin:"\\b("+ce.join("|")+")\\b"},{ +begin:/:/,end:/[;}{]/, +contains:[t.BLOCK_COMMENT,t.HEXCOLOR,t.IMPORTANT,t.CSS_NUMBER_MODE,...a,{ +begin:/(url|data-uri)\(/,end:/\)/,relevance:0,keywords:{built_in:"url data-uri" +},contains:[...a,{className:"string",begin:/[^)]/,endsWithParent:!0, +excludeEnd:!0}]},t.FUNCTION_DISPATCH]},{begin:n.lookahead(/@/),end:"[{;]", +relevance:0,illegal:/:/,contains:[{className:"keyword",begin:/@-?\w[\w]*(-\w+)*/ +},{begin:/\s/,endsWithParent:!0,excludeEnd:!0,relevance:0,keywords:{ +$pattern:/[a-z-]+/,keyword:"and or not only",attribute:se.join(" ")},contains:[{ +begin:/[a-z-]+(?=:)/,className:"attribute"},...a,t.CSS_NUMBER_MODE]}]},{ +className:"selector-tag",begin:"\\b("+re.join("|")+")\\b"}]}},grmr_diff:e=>{ +const n=e.regex;return{name:"Diff",aliases:["patch"],contains:[{ +className:"meta",relevance:10, +match:n.either(/^@@ +-\d+,\d+ +\+\d+,\d+ +@@/,/^\*\*\* +\d+,\d+ +\*\*\*\*$/,/^--- +\d+,\d+ +----$/) +},{className:"comment",variants:[{ +begin:n.either(/Index: /,/^index/,/={3,}/,/^-{3}/,/^\*{3} /,/^\+{3}/,/^diff --git/), +end:/$/},{match:/^\*{15}$/}]},{className:"addition",begin:/^\+/,end:/$/},{ +className:"deletion",begin:/^-/,end:/$/},{className:"addition",begin:/^!/, +end:/$/}]}},grmr_go:e=>{const n={ +keyword:["break","case","chan","const","continue","default","defer","else","fallthrough","for","func","go","goto","if","import","interface","map","package","range","return","select","struct","switch","type","var"], +type:["bool","byte","complex64","complex128","error","float32","float64","int8","int16","int32","int64","string","uint8","uint16","uint32","uint64","int","uint","uintptr","rune"], +literal:["true","false","iota","nil"], +built_in:["append","cap","close","complex","copy","imag","len","make","new","panic","print","println","real","recover","delete"] +};return{name:"Go",aliases:["golang"],keywords:n,illegal:"{const n=e.regex;return{name:"GraphQL",aliases:["gql"], +case_insensitive:!0,disableAutodetect:!1,keywords:{ +keyword:["query","mutation","subscription","type","input","schema","directive","interface","union","scalar","fragment","enum","on"], +literal:["true","false","null"]}, +contains:[e.HASH_COMMENT_MODE,e.QUOTE_STRING_MODE,e.NUMBER_MODE,{ +scope:"punctuation",match:/[.]{3}/,relevance:0},{scope:"punctuation", +begin:/[\!\(\)\:\=\[\]\{\|\}]{1}/,relevance:0},{scope:"variable",begin:/\$/, +end:/\W/,excludeEnd:!0,relevance:0},{scope:"meta",match:/@\w+/,excludeEnd:!0},{ +scope:"symbol",begin:n.concat(/[_A-Za-z][_0-9A-Za-z]*/,n.lookahead(/\s*:/)), +relevance:0}],illegal:[/[;<']/,/BEGIN/]}},grmr_ini:e=>{const n=e.regex,t={ +className:"number",relevance:0,variants:[{begin:/([+-]+)?[\d]+_[\d_]+/},{ +begin:e.NUMBER_RE}]},a=e.COMMENT();a.variants=[{begin:/;/,end:/$/},{begin:/#/, +end:/$/}];const i={className:"variable",variants:[{begin:/\$[\w\d"][\w\d_]*/},{ +begin:/\$\{(.*?)\}/}]},r={className:"literal", +begin:/\bon|off|true|false|yes|no\b/},s={className:"string", +contains:[e.BACKSLASH_ESCAPE],variants:[{begin:"'''",end:"'''",relevance:10},{ +begin:'"""',end:'"""',relevance:10},{begin:'"',end:'"'},{begin:"'",end:"'"}] +},o={begin:/\[/,end:/\]/,contains:[a,r,i,s,t,"self"],relevance:0 +},l=n.either(/[A-Za-z0-9_-]+/,/"(\\"|[^"])*"/,/'[^']*'/);return{ +name:"TOML, also INI",aliases:["toml"],case_insensitive:!0,illegal:/\S/, +contains:[a,{className:"section",begin:/\[+/,end:/\]+/},{ +begin:n.concat(l,"(\\s*\\.\\s*",l,")*",n.lookahead(/\s*=\s*[^#\s]/)), +className:"attr",starts:{end:/$/,contains:[a,o,r,i,s,t]}}]}},grmr_java:e=>{ +const n=e.regex,t="[\xc0-\u02b8a-zA-Z_$][\xc0-\u02b8a-zA-Z_$0-9]*",a=t+pe("(?:<"+t+"~~~(?:\\s*,\\s*"+t+"~~~)*>)?",/~~~/g,2),i={ +keyword:["synchronized","abstract","private","var","static","if","const ","for","while","strictfp","finally","protected","import","native","final","void","enum","else","break","transient","catch","instanceof","volatile","case","assert","package","default","public","try","switch","continue","throws","protected","public","private","module","requires","exports","do","sealed","yield","permits"], +literal:["false","true","null"], +type:["char","boolean","long","float","int","byte","short","double"], +built_in:["super","this"]},r={className:"meta",begin:"@"+t,contains:[{ +begin:/\(/,end:/\)/,contains:["self"]}]},s={className:"params",begin:/\(/, +end:/\)/,keywords:i,relevance:0,contains:[e.C_BLOCK_COMMENT_MODE],endsParent:!0} +;return{name:"Java",aliases:["jsp"],keywords:i,illegal:/<\/|#/, +contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{begin:/\w+@/, +relevance:0},{className:"doctag",begin:"@[A-Za-z]+"}]}),{ +begin:/import java\.[a-z]+\./,keywords:"import",relevance:2 +},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{begin:/"""/,end:/"""/, +className:"string",contains:[e.BACKSLASH_ESCAPE] +},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{ +match:[/\b(?:class|interface|enum|extends|implements|new)/,/\s+/,t],className:{ +1:"keyword",3:"title.class"}},{match:/non-sealed/,scope:"keyword"},{ +begin:[n.concat(/(?!else)/,t),/\s+/,t,/\s+/,/=(?!=)/],className:{1:"type", +3:"variable",5:"operator"}},{begin:[/record/,/\s+/,t],className:{1:"keyword", +3:"title.class"},contains:[s,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{ +beginKeywords:"new throw return else",relevance:0},{ +begin:["(?:"+a+"\\s+)",e.UNDERSCORE_IDENT_RE,/\s*(?=\()/],className:{ +2:"title.function"},keywords:i,contains:[{className:"params",begin:/\(/, +end:/\)/,keywords:i,relevance:0, +contains:[r,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,me,e.C_BLOCK_COMMENT_MODE] +},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},me,r]}},grmr_javascript:Oe, +grmr_json:e=>{const n=["true","false","null"],t={scope:"literal", +beginKeywords:n.join(" ")};return{name:"JSON",keywords:{literal:n},contains:[{ +className:"attr",begin:/"(\\.|[^\\"\r\n])*"(?=\s*:)/,relevance:1.01},{ +match:/[{}[\],:]/,className:"punctuation",relevance:0 +},e.QUOTE_STRING_MODE,t,e.C_NUMBER_MODE,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE], +illegal:"\\S"}},grmr_kotlin:e=>{const n={ +keyword:"abstract as val var vararg get set class object open private protected public noinline crossinline dynamic final enum if else do while for when throw try catch finally import package is in fun override companion reified inline lateinit init interface annotation data sealed internal infix operator out by constructor super tailrec where const inner suspend typealias external expect actual", +built_in:"Byte Short Char Int Long Boolean Float Double Void Unit Nothing", +literal:"true false null"},t={className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"@" +},a={className:"subst",begin:/\$\{/,end:/\}/,contains:[e.C_NUMBER_MODE]},i={ +className:"variable",begin:"\\$"+e.UNDERSCORE_IDENT_RE},r={className:"string", +variants:[{begin:'"""',end:'"""(?=[^"])',contains:[i,a]},{begin:"'",end:"'", +illegal:/\n/,contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"',illegal:/\n/, +contains:[e.BACKSLASH_ESCAPE,i,a]}]};a.contains.push(r);const s={ +className:"meta", +begin:"@(?:file|property|field|get|set|receiver|param|setparam|delegate)\\s*:(?:\\s*"+e.UNDERSCORE_IDENT_RE+")?" +},o={className:"meta",begin:"@"+e.UNDERSCORE_IDENT_RE,contains:[{begin:/\(/, +end:/\)/,contains:[e.inherit(r,{className:"string"}),"self"]}] +},l=me,c=e.COMMENT("/\\*","\\*/",{contains:[e.C_BLOCK_COMMENT_MODE]}),d={ +variants:[{className:"type",begin:e.UNDERSCORE_IDENT_RE},{begin:/\(/,end:/\)/, +contains:[]}]},g=d;return g.variants[1].contains=[d],d.variants[1].contains=[g], +{name:"Kotlin",aliases:["kt","kts"],keywords:n, +contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag", +begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,c,{className:"keyword", +begin:/\b(break|continue|return|this)\b/,starts:{contains:[{className:"symbol", +begin:/@\w+/}]}},t,s,o,{className:"function",beginKeywords:"fun",end:"[(]|$", +returnBegin:!0,excludeEnd:!0,keywords:n,relevance:5,contains:[{ +begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0, +contains:[e.UNDERSCORE_TITLE_MODE]},{className:"type",begin://, +keywords:"reified",relevance:0},{className:"params",begin:/\(/,end:/\)/, +endsParent:!0,keywords:n,relevance:0,contains:[{begin:/:/,end:/[=,\/]/, +endsWithParent:!0,contains:[d,e.C_LINE_COMMENT_MODE,c],relevance:0 +},e.C_LINE_COMMENT_MODE,c,s,o,r,e.C_NUMBER_MODE]},c]},{ +begin:[/class|interface|trait/,/\s+/,e.UNDERSCORE_IDENT_RE],beginScope:{ +3:"title.class"},keywords:"class interface trait",end:/[:\{(]|$/,excludeEnd:!0, +illegal:"extends implements",contains:[{ +beginKeywords:"public protected internal private constructor" +},e.UNDERSCORE_TITLE_MODE,{className:"type",begin://,excludeBegin:!0, +excludeEnd:!0,relevance:0},{className:"type",begin:/[,:]\s*/,end:/[<\(,){\s]|$/, +excludeBegin:!0,returnEnd:!0},s,o]},r,{className:"meta",begin:"^#!/usr/bin/env", +end:"$",illegal:"\n"},l]}},grmr_less:e=>{ +const n=ie(e),t=de,a="[\\w-]+",i="("+a+"|@\\{"+a+"\\})",r=[],s=[],o=e=>({ +className:"string",begin:"~?"+e+".*?"+e}),l=(e,n,t)=>({className:e,begin:n, +relevance:t}),c={$pattern:/[a-z-]+/,keyword:"and or not only", +attribute:se.join(" ")},d={begin:"\\(",end:"\\)",contains:s,keywords:c, +relevance:0} +;s.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,o("'"),o('"'),n.CSS_NUMBER_MODE,{ +begin:"(url|data-uri)\\(",starts:{className:"string",end:"[\\)\\n]", +excludeEnd:!0} +},n.HEXCOLOR,d,l("variable","@@?"+a,10),l("variable","@\\{"+a+"\\}"),l("built_in","~?`[^`]*?`"),{ +className:"attribute",begin:a+"\\s*:",end:":",returnBegin:!0,excludeEnd:!0 +},n.IMPORTANT,{beginKeywords:"and not"},n.FUNCTION_DISPATCH);const g=s.concat({ +begin:/\{/,end:/\}/,contains:r}),u={beginKeywords:"when",endsWithParent:!0, +contains:[{beginKeywords:"and not"}].concat(s)},b={begin:i+"\\s*:", +returnBegin:!0,end:/[;}]/,relevance:0,contains:[{begin:/-(webkit|moz|ms|o)-/ +},n.CSS_VARIABLE,{className:"attribute",begin:"\\b("+ce.join("|")+")\\b", +end:/(?=:)/,starts:{endsWithParent:!0,illegal:"[<=$]",relevance:0,contains:s}}] +},m={className:"keyword", +begin:"@(import|media|charset|font-face|(-[a-z]+-)?keyframes|supports|document|namespace|page|viewport|host)\\b", +starts:{end:"[;{}]",keywords:c,returnEnd:!0,contains:s,relevance:0}},p={ +className:"variable",variants:[{begin:"@"+a+"\\s*:",relevance:15},{begin:"@"+a +}],starts:{end:"[;}]",returnEnd:!0,contains:g}},_={variants:[{ +begin:"[\\.#:&\\[>]",end:"[;{}]"},{begin:i,end:/\{/}],returnBegin:!0, +returnEnd:!0,illegal:"[<='$\"]",relevance:0, +contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,u,l("keyword","all\\b"),l("variable","@\\{"+a+"\\}"),{ +begin:"\\b("+re.join("|")+")\\b",className:"selector-tag" +},n.CSS_NUMBER_MODE,l("selector-tag",i,0),l("selector-id","#"+i),l("selector-class","\\."+i,0),l("selector-tag","&",0),n.ATTRIBUTE_SELECTOR_MODE,{ +className:"selector-pseudo",begin:":("+oe.join("|")+")"},{ +className:"selector-pseudo",begin:":(:)?("+le.join("|")+")"},{begin:/\(/, +end:/\)/,relevance:0,contains:g},{begin:"!important"},n.FUNCTION_DISPATCH]},h={ +begin:a+":(:)?"+`(${t.join("|")})`,returnBegin:!0,contains:[_]} +;return r.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,m,p,h,b,_,u,n.FUNCTION_DISPATCH), +{name:"Less",case_insensitive:!0,illegal:"[=>'/<($\"]",contains:r}}, +grmr_lua:e=>{const n="\\[=*\\[",t="\\]=*\\]",a={begin:n,end:t,contains:["self"] +},i=[e.COMMENT("--(?!"+n+")","$"),e.COMMENT("--"+n,t,{contains:[a],relevance:10 +})];return{name:"Lua",keywords:{$pattern:e.UNDERSCORE_IDENT_RE, +literal:"true false nil", +keyword:"and break do else elseif end for goto if in local not or repeat return then until while", +built_in:"_G _ENV _VERSION __index __newindex __mode __call __metatable __tostring __len __gc __add __sub __mul __div __mod __pow __concat __unm __eq __lt __le assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall arg self coroutine resume yield status wrap create running debug getupvalue debug sethook getmetatable gethook setmetatable setlocal traceback setfenv getinfo setupvalue getlocal getregistry getfenv io lines write close flush open output type read stderr stdin input stdout popen tmpfile math log max acos huge ldexp pi cos tanh pow deg tan cosh sinh random randomseed frexp ceil floor rad abs sqrt modf asin min mod fmod log10 atan2 exp sin atan os exit setlocale date getenv difftime remove time clock tmpname rename execute package preload loadlib loaded loaders cpath config path seeall string sub upper len gfind rep find match char dump gmatch reverse byte format gsub lower table setn insert getn foreachi maxn foreach concat sort remove" +},contains:i.concat([{className:"function",beginKeywords:"function",end:"\\)", +contains:[e.inherit(e.TITLE_MODE,{ +begin:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{className:"params", +begin:"\\(",endsWithParent:!0,contains:i}].concat(i) +},e.C_NUMBER_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"string", +begin:n,end:t,contains:[a],relevance:5}])}},grmr_makefile:e=>{const n={ +className:"variable",variants:[{begin:"\\$\\("+e.UNDERSCORE_IDENT_RE+"\\)", +contains:[e.BACKSLASH_ESCAPE]},{begin:/\$[@%{ +const n={begin:/<\/?[A-Za-z_]/,end:">",subLanguage:"xml",relevance:0},t={ +variants:[{begin:/\[.+?\]\[.*?\]/,relevance:0},{ +begin:/\[.+?\]\(((data|javascript|mailto):|(?:http|ftp)s?:\/\/).*?\)/, +relevance:2},{ +begin:e.regex.concat(/\[.+?\]\(/,/[A-Za-z][A-Za-z0-9+.-]*/,/:\/\/.*?\)/), +relevance:2},{begin:/\[.+?\]\([./?&#].*?\)/,relevance:1},{ +begin:/\[.*?\]\(.*?\)/,relevance:0}],returnBegin:!0,contains:[{match:/\[(?=\])/ +},{className:"string",relevance:0,begin:"\\[",end:"\\]",excludeBegin:!0, +returnEnd:!0},{className:"link",relevance:0,begin:"\\]\\(",end:"\\)", +excludeBegin:!0,excludeEnd:!0},{className:"symbol",relevance:0,begin:"\\]\\[", +end:"\\]",excludeBegin:!0,excludeEnd:!0}]},a={className:"strong",contains:[], +variants:[{begin:/_{2}(?!\s)/,end:/_{2}/},{begin:/\*{2}(?!\s)/,end:/\*{2}/}] +},i={className:"emphasis",contains:[],variants:[{begin:/\*(?![*\s])/,end:/\*/},{ +begin:/_(?![_\s])/,end:/_/,relevance:0}]},r=e.inherit(a,{contains:[] +}),s=e.inherit(i,{contains:[]});a.contains.push(s),i.contains.push(r) +;let o=[n,t];return[a,i,r,s].forEach((e=>{e.contains=e.contains.concat(o) +})),o=o.concat(a,i),{name:"Markdown",aliases:["md","mkdown","mkd"],contains:[{ +className:"section",variants:[{begin:"^#{1,6}",end:"$",contains:o},{ +begin:"(?=^.+?\\n[=-]{2,}$)",contains:[{begin:"^[=-]*$"},{begin:"^",end:"\\n", +contains:o}]}]},n,{className:"bullet",begin:"^[ \t]*([*+-]|(\\d+\\.))(?=\\s+)", +end:"\\s+",excludeEnd:!0},a,i,{className:"quote",begin:"^>\\s+",contains:o, +end:"$"},{className:"code",variants:[{begin:"(`{3,})[^`](.|\\n)*?\\1`*[ ]*"},{ +begin:"(~{3,})[^~](.|\\n)*?\\1~*[ ]*"},{begin:"```",end:"```+[ ]*$"},{ +begin:"~~~",end:"~~~+[ ]*$"},{begin:"`.+?`"},{begin:"(?=^( {4}|\\t))", +contains:[{begin:"^( {4}|\\t)",end:"(\\n)$"}],relevance:0}]},{ +begin:"^[-\\*]{3,}",end:"$"},t,{begin:/^\[[^\n]+\]:/,returnBegin:!0,contains:[{ +className:"symbol",begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0},{ +className:"link",begin:/:\s*/,end:/$/,excludeBegin:!0}]}]}},grmr_objectivec:e=>{ +const n=/[a-zA-Z@][a-zA-Z0-9_]*/,t={$pattern:n, +keyword:["@interface","@class","@protocol","@implementation"]};return{ +name:"Objective-C",aliases:["mm","objc","obj-c","obj-c++","objective-c++"], +keywords:{"variable.language":["this","super"],$pattern:n, +keyword:["while","export","sizeof","typedef","const","struct","for","union","volatile","static","mutable","if","do","return","goto","enum","else","break","extern","asm","case","default","register","explicit","typename","switch","continue","inline","readonly","assign","readwrite","self","@synchronized","id","typeof","nonatomic","IBOutlet","IBAction","strong","weak","copy","in","out","inout","bycopy","byref","oneway","__strong","__weak","__block","__autoreleasing","@private","@protected","@public","@try","@property","@end","@throw","@catch","@finally","@autoreleasepool","@synthesize","@dynamic","@selector","@optional","@required","@encode","@package","@import","@defs","@compatibility_alias","__bridge","__bridge_transfer","__bridge_retained","__bridge_retain","__covariant","__contravariant","__kindof","_Nonnull","_Nullable","_Null_unspecified","__FUNCTION__","__PRETTY_FUNCTION__","__attribute__","getter","setter","retain","unsafe_unretained","nonnull","nullable","null_unspecified","null_resettable","class","instancetype","NS_DESIGNATED_INITIALIZER","NS_UNAVAILABLE","NS_REQUIRES_SUPER","NS_RETURNS_INNER_POINTER","NS_INLINE","NS_AVAILABLE","NS_DEPRECATED","NS_ENUM","NS_OPTIONS","NS_SWIFT_UNAVAILABLE","NS_ASSUME_NONNULL_BEGIN","NS_ASSUME_NONNULL_END","NS_REFINED_FOR_SWIFT","NS_SWIFT_NAME","NS_SWIFT_NOTHROW","NS_DURING","NS_HANDLER","NS_ENDHANDLER","NS_VALUERETURN","NS_VOIDRETURN"], +literal:["false","true","FALSE","TRUE","nil","YES","NO","NULL"], +built_in:["dispatch_once_t","dispatch_queue_t","dispatch_sync","dispatch_async","dispatch_once"], +type:["int","float","char","unsigned","signed","short","long","double","wchar_t","unichar","void","bool","BOOL","id|0","_Bool"] +},illegal:"/,end:/$/,illegal:"\\n" +},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"class", +begin:"("+t.keyword.join("|")+")\\b",end:/(\{|$)/,excludeEnd:!0,keywords:t, +contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"\\."+e.UNDERSCORE_IDENT_RE, +relevance:0}]}},grmr_perl:e=>{const n=e.regex,t=/[dualxmsipngr]{0,12}/,a={ +$pattern:/[\w.]+/, +keyword:"abs accept alarm and atan2 bind binmode bless break caller chdir chmod chomp chop chown chr chroot close closedir connect continue cos crypt dbmclose dbmopen defined delete die do dump each else elsif endgrent endhostent endnetent endprotoent endpwent endservent eof eval exec exists exit exp fcntl fileno flock for foreach fork format formline getc getgrent getgrgid getgrnam gethostbyaddr gethostbyname gethostent getlogin getnetbyaddr getnetbyname getnetent getpeername getpgrp getpriority getprotobyname getprotobynumber getprotoent getpwent getpwnam getpwuid getservbyname getservbyport getservent getsockname getsockopt given glob gmtime goto grep gt hex if index int ioctl join keys kill last lc lcfirst length link listen local localtime log lstat lt ma map mkdir msgctl msgget msgrcv msgsnd my ne next no not oct open opendir or ord our pack package pipe pop pos print printf prototype push q|0 qq quotemeta qw qx rand read readdir readline readlink readpipe recv redo ref rename require reset return reverse rewinddir rindex rmdir say scalar seek seekdir select semctl semget semop send setgrent sethostent setnetent setpgrp setpriority setprotoent setpwent setservent setsockopt shift shmctl shmget shmread shmwrite shutdown sin sleep socket socketpair sort splice split sprintf sqrt srand stat state study sub substr symlink syscall sysopen sysread sysseek system syswrite tell telldir tie tied time times tr truncate uc ucfirst umask undef unless unlink unpack unshift untie until use utime values vec wait waitpid wantarray warn when while write x|0 xor y|0" +},i={className:"subst",begin:"[$@]\\{",end:"\\}",keywords:a},r={begin:/->\{/, +end:/\}/},s={variants:[{begin:/\$\d/},{ +begin:n.concat(/[$%@](\^\w\b|#\w+(::\w+)*|\{\w+\}|\w+(::\w*)*)/,"(?![A-Za-z])(?![@$%])") +},{begin:/[$%@][^\s\w{]/,relevance:0}] +},o=[e.BACKSLASH_ESCAPE,i,s],l=[/!/,/\//,/\|/,/\?/,/'/,/"/,/#/],c=(e,a,i="\\1")=>{ +const r="\\1"===i?i:n.concat(i,a) +;return n.concat(n.concat("(?:",e,")"),a,/(?:\\.|[^\\\/])*?/,r,/(?:\\.|[^\\\/])*?/,i,t) +},d=(e,a,i)=>n.concat(n.concat("(?:",e,")"),a,/(?:\\.|[^\\\/])*?/,i,t),g=[s,e.HASH_COMMENT_MODE,e.COMMENT(/^=\w/,/=cut/,{ +endsWithParent:!0}),r,{className:"string",contains:o,variants:[{ +begin:"q[qwxr]?\\s*\\(",end:"\\)",relevance:5},{begin:"q[qwxr]?\\s*\\[", +end:"\\]",relevance:5},{begin:"q[qwxr]?\\s*\\{",end:"\\}",relevance:5},{ +begin:"q[qwxr]?\\s*\\|",end:"\\|",relevance:5},{begin:"q[qwxr]?\\s*<",end:">", +relevance:5},{begin:"qw\\s+q",end:"q",relevance:5},{begin:"'",end:"'", +contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"'},{begin:"`",end:"`", +contains:[e.BACKSLASH_ESCAPE]},{begin:/\{\w+\}/,relevance:0},{ +begin:"-?\\w+\\s*=>",relevance:0}]},{className:"number", +begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b", +relevance:0},{ +begin:"(\\/\\/|"+e.RE_STARTERS_RE+"|\\b(split|return|print|reverse|grep)\\b)\\s*", +keywords:"split return print reverse grep",relevance:0, +contains:[e.HASH_COMMENT_MODE,{className:"regexp",variants:[{ +begin:c("s|tr|y",n.either(...l,{capture:!0}))},{begin:c("s|tr|y","\\(","\\)")},{ +begin:c("s|tr|y","\\[","\\]")},{begin:c("s|tr|y","\\{","\\}")}],relevance:2},{ +className:"regexp",variants:[{begin:/(m|qr)\/\//,relevance:0},{ +begin:d("(?:m|qr)?",/\//,/\//)},{begin:d("m|qr",n.either(...l,{capture:!0 +}),/\1/)},{begin:d("m|qr",/\(/,/\)/)},{begin:d("m|qr",/\[/,/\]/)},{ +begin:d("m|qr",/\{/,/\}/)}]}]},{className:"function",beginKeywords:"sub", +end:"(\\s*\\(.*?\\))?[;{]",excludeEnd:!0,relevance:5,contains:[e.TITLE_MODE]},{ +begin:"-\\w\\b",relevance:0},{begin:"^__DATA__$",end:"^__END__$", +subLanguage:"mojolicious",contains:[{begin:"^@@.*",end:"$",className:"comment"}] +}];return i.contains=g,r.contains=g,{name:"Perl",aliases:["pl","pm"],keywords:a, +contains:g}},grmr_php:e=>{ +const n=e.regex,t=/(?![A-Za-z0-9])(?![$])/,a=n.concat(/[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/,t),i=n.concat(/(\\?[A-Z][a-z0-9_\x7f-\xff]+|\\?[A-Z]+(?=[A-Z][a-z0-9_\x7f-\xff])){1,}/,t),r={ +scope:"variable",match:"\\$+"+a},s={scope:"subst",variants:[{begin:/\$\w+/},{ +begin:/\{\$/,end:/\}/}]},o=e.inherit(e.APOS_STRING_MODE,{illegal:null +}),l="[ \t\n]",c={scope:"string",variants:[e.inherit(e.QUOTE_STRING_MODE,{ +illegal:null,contains:e.QUOTE_STRING_MODE.contains.concat(s)}),o,{ +begin:/<<<[ \t]*(?:(\w+)|"(\w+)")\n/,end:/[ \t]*(\w+)\b/, +contains:e.QUOTE_STRING_MODE.contains.concat(s),"on:begin":(e,n)=>{ +n.data._beginMatch=e[1]||e[2]},"on:end":(e,n)=>{ +n.data._beginMatch!==e[1]&&n.ignoreMatch()}},e.END_SAME_AS_BEGIN({ +begin:/<<<[ \t]*'(\w+)'\n/,end:/[ \t]*(\w+)\b/})]},d={scope:"number",variants:[{ +begin:"\\b0[bB][01]+(?:_[01]+)*\\b"},{begin:"\\b0[oO][0-7]+(?:_[0-7]+)*\\b"},{ +begin:"\\b0[xX][\\da-fA-F]+(?:_[\\da-fA-F]+)*\\b"},{ +begin:"(?:\\b\\d+(?:_\\d+)*(\\.(?:\\d+(?:_\\d+)*))?|\\B\\.\\d+)(?:[eE][+-]?\\d+)?" +}],relevance:0 +},g=["false","null","true"],u=["__CLASS__","__DIR__","__FILE__","__FUNCTION__","__COMPILER_HALT_OFFSET__","__LINE__","__METHOD__","__NAMESPACE__","__TRAIT__","die","echo","exit","include","include_once","print","require","require_once","array","abstract","and","as","binary","bool","boolean","break","callable","case","catch","class","clone","const","continue","declare","default","do","double","else","elseif","empty","enddeclare","endfor","endforeach","endif","endswitch","endwhile","enum","eval","extends","final","finally","float","for","foreach","from","global","goto","if","implements","instanceof","insteadof","int","integer","interface","isset","iterable","list","match|0","mixed","new","never","object","or","private","protected","public","readonly","real","return","string","switch","throw","trait","try","unset","use","var","void","while","xor","yield"],b=["Error|0","AppendIterator","ArgumentCountError","ArithmeticError","ArrayIterator","ArrayObject","AssertionError","BadFunctionCallException","BadMethodCallException","CachingIterator","CallbackFilterIterator","CompileError","Countable","DirectoryIterator","DivisionByZeroError","DomainException","EmptyIterator","ErrorException","Exception","FilesystemIterator","FilterIterator","GlobIterator","InfiniteIterator","InvalidArgumentException","IteratorIterator","LengthException","LimitIterator","LogicException","MultipleIterator","NoRewindIterator","OutOfBoundsException","OutOfRangeException","OuterIterator","OverflowException","ParentIterator","ParseError","RangeException","RecursiveArrayIterator","RecursiveCachingIterator","RecursiveCallbackFilterIterator","RecursiveDirectoryIterator","RecursiveFilterIterator","RecursiveIterator","RecursiveIteratorIterator","RecursiveRegexIterator","RecursiveTreeIterator","RegexIterator","RuntimeException","SeekableIterator","SplDoublyLinkedList","SplFileInfo","SplFileObject","SplFixedArray","SplHeap","SplMaxHeap","SplMinHeap","SplObjectStorage","SplObserver","SplPriorityQueue","SplQueue","SplStack","SplSubject","SplTempFileObject","TypeError","UnderflowException","UnexpectedValueException","UnhandledMatchError","ArrayAccess","BackedEnum","Closure","Fiber","Generator","Iterator","IteratorAggregate","Serializable","Stringable","Throwable","Traversable","UnitEnum","WeakReference","WeakMap","Directory","__PHP_Incomplete_Class","parent","php_user_filter","self","static","stdClass"],m={ +keyword:u,literal:(e=>{const n=[];return e.forEach((e=>{ +n.push(e),e.toLowerCase()===e?n.push(e.toUpperCase()):n.push(e.toLowerCase()) +})),n})(g),built_in:b},p=e=>e.map((e=>e.replace(/\|\d+$/,""))),_={variants:[{ +match:[/new/,n.concat(l,"+"),n.concat("(?!",p(b).join("\\b|"),"\\b)"),i],scope:{ +1:"keyword",4:"title.class"}}]},h=n.concat(a,"\\b(?!\\()"),f={variants:[{ +match:[n.concat(/::/,n.lookahead(/(?!class\b)/)),h],scope:{2:"variable.constant" +}},{match:[/::/,/class/],scope:{2:"variable.language"}},{ +match:[i,n.concat(/::/,n.lookahead(/(?!class\b)/)),h],scope:{1:"title.class", +3:"variable.constant"}},{match:[i,n.concat("::",n.lookahead(/(?!class\b)/))], +scope:{1:"title.class"}},{match:[i,/::/,/class/],scope:{1:"title.class", +3:"variable.language"}}]},E={scope:"attr", +match:n.concat(a,n.lookahead(":"),n.lookahead(/(?!::)/))},y={relevance:0, +begin:/\(/,end:/\)/,keywords:m,contains:[E,r,f,e.C_BLOCK_COMMENT_MODE,c,d,_] +},N={relevance:0, +match:[/\b/,n.concat("(?!fn\\b|function\\b|",p(u).join("\\b|"),"|",p(b).join("\\b|"),"\\b)"),a,n.concat(l,"*"),n.lookahead(/(?=\()/)], +scope:{3:"title.function.invoke"},contains:[y]};y.contains.push(N) +;const w=[E,f,e.C_BLOCK_COMMENT_MODE,c,d,_];return{case_insensitive:!1, +keywords:m,contains:[{begin:n.concat(/#\[\s*/,i),beginScope:"meta",end:/]/, +endScope:"meta",keywords:{literal:g,keyword:["new","array"]},contains:[{ +begin:/\[/,end:/]/,keywords:{literal:g,keyword:["new","array"]}, +contains:["self",...w]},...w,{scope:"meta",match:i}] +},e.HASH_COMMENT_MODE,e.COMMENT("//","$"),e.COMMENT("/\\*","\\*/",{contains:[{ +scope:"doctag",match:"@[A-Za-z]+"}]}),{match:/__halt_compiler\(\);/, +keywords:"__halt_compiler",starts:{scope:"comment",end:e.MATCH_NOTHING_RE, +contains:[{match:/\?>/,scope:"meta",endsParent:!0}]}},{scope:"meta",variants:[{ +begin:/<\?php/,relevance:10},{begin:/<\?=/},{begin:/<\?/,relevance:.1},{ +begin:/\?>/}]},{scope:"variable.language",match:/\$this\b/},r,N,f,{ +match:[/const/,/\s/,a],scope:{1:"keyword",3:"variable.constant"}},_,{ +scope:"function",relevance:0,beginKeywords:"fn function",end:/[;{]/, +excludeEnd:!0,illegal:"[$%\\[]",contains:[{beginKeywords:"use" +},e.UNDERSCORE_TITLE_MODE,{begin:"=>",endsParent:!0},{scope:"params", +begin:"\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0,keywords:m, +contains:["self",r,f,e.C_BLOCK_COMMENT_MODE,c,d]}]},{scope:"class",variants:[{ +beginKeywords:"enum",illegal:/[($"]/},{beginKeywords:"class interface trait", +illegal:/[:($"]/}],relevance:0,end:/\{/,excludeEnd:!0,contains:[{ +beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{ +beginKeywords:"namespace",relevance:0,end:";",illegal:/[.']/, +contains:[e.inherit(e.UNDERSCORE_TITLE_MODE,{scope:"title.class"})]},{ +beginKeywords:"use",relevance:0,end:";",contains:[{ +match:/\b(as|const|function)\b/,scope:"keyword"},e.UNDERSCORE_TITLE_MODE]},c,d]} +},grmr_php_template:e=>({name:"PHP template",subLanguage:"xml",contains:[{ +begin:/<\?(php|=)?/,end:/\?>/,subLanguage:"php",contains:[{begin:"/\\*", +end:"\\*/",skip:!0},{begin:'b"',end:'"',skip:!0},{begin:"b'",end:"'",skip:!0 +},e.inherit(e.APOS_STRING_MODE,{illegal:null,className:null,contains:null, +skip:!0}),e.inherit(e.QUOTE_STRING_MODE,{illegal:null,className:null, +contains:null,skip:!0})]}]}),grmr_plaintext:e=>({name:"Plain text", +aliases:["text","txt"],disableAutodetect:!0}),grmr_python:e=>{ +const n=e.regex,t=/[\p{XID_Start}_]\p{XID_Continue}*/u,a=["and","as","assert","async","await","break","case","class","continue","def","del","elif","else","except","finally","for","from","global","if","import","in","is","lambda","match","nonlocal|10","not","or","pass","raise","return","try","while","with","yield"],i={ +$pattern:/[A-Za-z]\w+|__\w+__/,keyword:a, +built_in:["__import__","abs","all","any","ascii","bin","bool","breakpoint","bytearray","bytes","callable","chr","classmethod","compile","complex","delattr","dict","dir","divmod","enumerate","eval","exec","filter","float","format","frozenset","getattr","globals","hasattr","hash","help","hex","id","input","int","isinstance","issubclass","iter","len","list","locals","map","max","memoryview","min","next","object","oct","open","ord","pow","print","property","range","repr","reversed","round","set","setattr","slice","sorted","staticmethod","str","sum","super","tuple","type","vars","zip"], +literal:["__debug__","Ellipsis","False","None","NotImplemented","True"], +type:["Any","Callable","Coroutine","Dict","List","Literal","Generic","Optional","Sequence","Set","Tuple","Type","Union"] +},r={className:"meta",begin:/^(>>>|\.\.\.) /},s={className:"subst",begin:/\{/, +end:/\}/,keywords:i,illegal:/#/},o={begin:/\{\{/,relevance:0},l={ +className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{ +begin:/([uU]|[bB]|[rR]|[bB][rR]|[rR][bB])?'''/,end:/'''/, +contains:[e.BACKSLASH_ESCAPE,r],relevance:10},{ +begin:/([uU]|[bB]|[rR]|[bB][rR]|[rR][bB])?"""/,end:/"""/, +contains:[e.BACKSLASH_ESCAPE,r],relevance:10},{ +begin:/([fF][rR]|[rR][fF]|[fF])'''/,end:/'''/, +contains:[e.BACKSLASH_ESCAPE,r,o,s]},{begin:/([fF][rR]|[rR][fF]|[fF])"""/, +end:/"""/,contains:[e.BACKSLASH_ESCAPE,r,o,s]},{begin:/([uU]|[rR])'/,end:/'/, +relevance:10},{begin:/([uU]|[rR])"/,end:/"/,relevance:10},{ +begin:/([bB]|[bB][rR]|[rR][bB])'/,end:/'/},{begin:/([bB]|[bB][rR]|[rR][bB])"/, +end:/"/},{begin:/([fF][rR]|[rR][fF]|[fF])'/,end:/'/, +contains:[e.BACKSLASH_ESCAPE,o,s]},{begin:/([fF][rR]|[rR][fF]|[fF])"/,end:/"/, +contains:[e.BACKSLASH_ESCAPE,o,s]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE] +},c="[0-9](_?[0-9])*",d=`(\\b(${c}))?\\.(${c})|\\b(${c})\\.`,g="\\b|"+a.join("|"),u={ +className:"number",relevance:0,variants:[{ +begin:`(\\b(${c})|(${d}))[eE][+-]?(${c})[jJ]?(?=${g})`},{begin:`(${d})[jJ]?`},{ +begin:`\\b([1-9](_?[0-9])*|0+(_?0)*)[lLjJ]?(?=${g})`},{ +begin:`\\b0[bB](_?[01])+[lL]?(?=${g})`},{begin:`\\b0[oO](_?[0-7])+[lL]?(?=${g})` +},{begin:`\\b0[xX](_?[0-9a-fA-F])+[lL]?(?=${g})`},{begin:`\\b(${c})[jJ](?=${g})` +}]},b={className:"comment",begin:n.lookahead(/# type:/),end:/$/,keywords:i, +contains:[{begin:/# type:/},{begin:/#/,end:/\b\B/,endsWithParent:!0}]},m={ +className:"params",variants:[{className:"",begin:/\(\s*\)/,skip:!0},{begin:/\(/, +end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:i, +contains:["self",r,u,l,e.HASH_COMMENT_MODE]}]};return s.contains=[l,u,r],{ +name:"Python",aliases:["py","gyp","ipython"],unicodeRegex:!0,keywords:i, +illegal:/(<\/|\?)|=>/,contains:[r,u,{begin:/\bself\b/},{beginKeywords:"if", +relevance:0},l,b,e.HASH_COMMENT_MODE,{match:[/\bdef/,/\s+/,t],scope:{ +1:"keyword",3:"title.function"},contains:[m]},{variants:[{ +match:[/\bclass/,/\s+/,t,/\s*/,/\(\s*/,t,/\s*\)/]},{match:[/\bclass/,/\s+/,t]}], +scope:{1:"keyword",3:"title.class",6:"title.class.inherited"}},{ +className:"meta",begin:/^[\t ]*@/,end:/(?=#)|$/,contains:[u,m,l]}]}}, +grmr_python_repl:e=>({aliases:["pycon"],contains:[{className:"meta.prompt", +starts:{end:/ |$/,starts:{end:"$",subLanguage:"python"}},variants:[{ +begin:/^>>>(?=[ ]|$)/},{begin:/^\.\.\.(?=[ ]|$)/}]}]}),grmr_r:e=>{ +const n=e.regex,t=/(?:(?:[a-zA-Z]|\.[._a-zA-Z])[._a-zA-Z0-9]*)|\.(?!\d)/,a=n.either(/0[xX][0-9a-fA-F]+\.[0-9a-fA-F]*[pP][+-]?\d+i?/,/0[xX][0-9a-fA-F]+(?:[pP][+-]?\d+)?[Li]?/,/(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?[Li]?/),i=/[=!<>:]=|\|\||&&|:::?|<-|<<-|->>|->|\|>|[-+*\/?!$&|:<=>@^~]|\*\*/,r=n.either(/[()]/,/[{}]/,/\[\[/,/[[\]]/,/\\/,/,/) +;return{name:"R",keywords:{$pattern:t, +keyword:"function if in break next repeat else for while", +literal:"NULL NA TRUE FALSE Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10", +built_in:"LETTERS letters month.abb month.name pi T F abs acos acosh all any anyNA Arg as.call as.character as.complex as.double as.environment as.integer as.logical as.null.default as.numeric as.raw asin asinh atan atanh attr attributes baseenv browser c call ceiling class Conj cos cosh cospi cummax cummin cumprod cumsum digamma dim dimnames emptyenv exp expression floor forceAndCall gamma gc.time globalenv Im interactive invisible is.array is.atomic is.call is.character is.complex is.double is.environment is.expression is.finite is.function is.infinite is.integer is.language is.list is.logical is.matrix is.na is.name is.nan is.null is.numeric is.object is.pairlist is.raw is.recursive is.single is.symbol lazyLoadDBfetch length lgamma list log max min missing Mod names nargs nzchar oldClass on.exit pos.to.env proc.time prod quote range Re rep retracemem return round seq_along seq_len seq.int sign signif sin sinh sinpi sqrt standardGeneric substitute sum switch tan tanh tanpi tracemem trigamma trunc unclass untracemem UseMethod xtfrm" +},contains:[e.COMMENT(/#'/,/$/,{contains:[{scope:"doctag",match:/@examples/, +starts:{end:n.lookahead(n.either(/\n^#'\s*(?=@[a-zA-Z]+)/,/\n^(?!#')/)), +endsParent:!0}},{scope:"doctag",begin:"@param",end:/$/,contains:[{ +scope:"variable",variants:[{match:t},{match:/`(?:\\.|[^`\\])+`/}],endsParent:!0 +}]},{scope:"doctag",match:/@[a-zA-Z]+/},{scope:"keyword",match:/\\[a-zA-Z]+/}] +}),e.HASH_COMMENT_MODE,{scope:"string",contains:[e.BACKSLASH_ESCAPE], +variants:[e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\(/,end:/\)(-*)"/ +}),e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\{/,end:/\}(-*)"/ +}),e.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\[/,end:/\](-*)"/ +}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\(/,end:/\)(-*)'/ +}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\{/,end:/\}(-*)'/ +}),e.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\[/,end:/\](-*)'/}),{begin:'"',end:'"', +relevance:0},{begin:"'",end:"'",relevance:0}]},{relevance:0,variants:[{scope:{ +1:"operator",2:"number"},match:[i,a]},{scope:{1:"operator",2:"number"}, +match:[/%[^%]*%/,a]},{scope:{1:"punctuation",2:"number"},match:[r,a]},{scope:{ +2:"number"},match:[/[^a-zA-Z0-9._]|^/,a]}]},{scope:{3:"operator"}, +match:[t,/\s+/,/<-/,/\s+/]},{scope:"operator",relevance:0,variants:[{match:i},{ +match:/%[^%]*%/}]},{scope:"punctuation",relevance:0,match:r},{begin:"`",end:"`", +contains:[{begin:/\\./}]}]}},grmr_ruby:e=>{ +const n=e.regex,t="([a-zA-Z_]\\w*[!?=]?|[-+~]@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?)",a=n.either(/\b([A-Z]+[a-z0-9]+)+/,/\b([A-Z]+[a-z0-9]+)+[A-Z]+/),i=n.concat(a,/(::\w+)*/),r={ +"variable.constant":["__FILE__","__LINE__","__ENCODING__"], +"variable.language":["self","super"], +keyword:["alias","and","begin","BEGIN","break","case","class","defined","do","else","elsif","end","END","ensure","for","if","in","module","next","not","or","redo","require","rescue","retry","return","then","undef","unless","until","when","while","yield","include","extend","prepend","public","private","protected","raise","throw"], +built_in:["proc","lambda","attr_accessor","attr_reader","attr_writer","define_method","private_constant","module_function"], +literal:["true","false","nil"]},s={className:"doctag",begin:"@[A-Za-z]+"},o={ +begin:"#<",end:">"},l=[e.COMMENT("#","$",{contains:[s] +}),e.COMMENT("^=begin","^=end",{contains:[s],relevance:10 +}),e.COMMENT("^__END__",e.MATCH_NOTHING_RE)],c={className:"subst",begin:/#\{/, +end:/\}/,keywords:r},d={className:"string",contains:[e.BACKSLASH_ESCAPE,c], +variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/`/,end:/`/},{ +begin:/%[qQwWx]?\(/,end:/\)/},{begin:/%[qQwWx]?\[/,end:/\]/},{ +begin:/%[qQwWx]?\{/,end:/\}/},{begin:/%[qQwWx]?/},{begin:/%[qQwWx]?\//, +end:/\//},{begin:/%[qQwWx]?%/,end:/%/},{begin:/%[qQwWx]?-/,end:/-/},{ +begin:/%[qQwWx]?\|/,end:/\|/},{begin:/\B\?(\\\d{1,3})/},{ +begin:/\B\?(\\x[A-Fa-f0-9]{1,2})/},{begin:/\B\?(\\u\{?[A-Fa-f0-9]{1,6}\}?)/},{ +begin:/\B\?(\\M-\\C-|\\M-\\c|\\c\\M-|\\M-|\\C-\\M-)[\x20-\x7e]/},{ +begin:/\B\?\\(c|C-)[\x20-\x7e]/},{begin:/\B\?\\?\S/},{ +begin:n.concat(/<<[-~]?'?/,n.lookahead(/(\w+)(?=\W)[^\n]*\n(?:[^\n]*\n)*?\s*\1\b/)), +contains:[e.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/, +contains:[e.BACKSLASH_ESCAPE,c]})]}]},g="[0-9](_?[0-9])*",u={className:"number", +relevance:0,variants:[{ +begin:`\\b([1-9](_?[0-9])*|0)(\\.(${g}))?([eE][+-]?(${g})|r)?i?\\b`},{ +begin:"\\b0[dD][0-9](_?[0-9])*r?i?\\b"},{begin:"\\b0[bB][0-1](_?[0-1])*r?i?\\b" +},{begin:"\\b0[oO][0-7](_?[0-7])*r?i?\\b"},{ +begin:"\\b0[xX][0-9a-fA-F](_?[0-9a-fA-F])*r?i?\\b"},{ +begin:"\\b0(_?[0-7])+r?i?\\b"}]},b={variants:[{match:/\(\)/},{ +className:"params",begin:/\(/,end:/(?=\))/,excludeBegin:!0,endsParent:!0, +keywords:r}]},m=[d,{variants:[{match:[/class\s+/,i,/\s+<\s+/,i]},{ +match:[/\b(class|module)\s+/,i]}],scope:{2:"title.class", +4:"title.class.inherited"},keywords:r},{match:[/(include|extend)\s+/,i],scope:{ +2:"title.class"},keywords:r},{relevance:0,match:[i,/\.new[. (]/],scope:{ +1:"title.class"}},{relevance:0,match:/\b[A-Z][A-Z_0-9]+\b/, +className:"variable.constant"},{relevance:0,match:a,scope:"title.class"},{ +match:[/def/,/\s+/,t],scope:{1:"keyword",3:"title.function"},contains:[b]},{ +begin:e.IDENT_RE+"::"},{className:"symbol", +begin:e.UNDERSCORE_IDENT_RE+"(!|\\?)?:",relevance:0},{className:"symbol", +begin:":(?!\\s)",contains:[d,{begin:t}],relevance:0},u,{className:"variable", +begin:"(\\$\\W)|((\\$|@@?)(\\w+))(?=[^@$?])(?![A-Za-z])(?![@$?'])"},{ +className:"params",begin:/\|/,end:/\|/,excludeBegin:!0,excludeEnd:!0, +relevance:0,keywords:r},{begin:"("+e.RE_STARTERS_RE+"|unless)\\s*", +keywords:"unless",contains:[{className:"regexp",contains:[e.BACKSLASH_ESCAPE,c], +illegal:/\n/,variants:[{begin:"/",end:"/[a-z]*"},{begin:/%r\{/,end:/\}[a-z]*/},{ +begin:"%r\\(",end:"\\)[a-z]*"},{begin:"%r!",end:"![a-z]*"},{begin:"%r\\[", +end:"\\][a-z]*"}]}].concat(o,l),relevance:0}].concat(o,l) +;c.contains=m,b.contains=m;const p=[{begin:/^\s*=>/,starts:{end:"$",contains:m} +},{className:"meta.prompt", +begin:"^([>?]>|[\\w#]+\\(\\w+\\):\\d+:\\d+[>*]|(\\w+-)?\\d+\\.\\d+\\.\\d+(p\\d+)?[^\\d][^>]+>)(?=[ ])", +starts:{end:"$",keywords:r,contains:m}}];return l.unshift(o),{name:"Ruby", +aliases:["rb","gemspec","podspec","thor","irb"],keywords:r,illegal:/\/\*/, +contains:[e.SHEBANG({binary:"ruby"})].concat(p).concat(l).concat(m)}}, +grmr_rust:e=>{const n=e.regex,t={className:"title.function.invoke",relevance:0, +begin:n.concat(/\b/,/(?!let|for|while|if|else|match\b)/,e.IDENT_RE,n.lookahead(/\s*\(/)) +},a="([ui](8|16|32|64|128|size)|f(32|64))?",i=["drop ","Copy","Send","Sized","Sync","Drop","Fn","FnMut","FnOnce","ToOwned","Clone","Debug","PartialEq","PartialOrd","Eq","Ord","AsRef","AsMut","Into","From","Default","Iterator","Extend","IntoIterator","DoubleEndedIterator","ExactSizeIterator","SliceConcatExt","ToString","assert!","assert_eq!","bitflags!","bytes!","cfg!","col!","concat!","concat_idents!","debug_assert!","debug_assert_eq!","env!","eprintln!","panic!","file!","format!","format_args!","include_bytes!","include_str!","line!","local_data_key!","module_path!","option_env!","print!","println!","select!","stringify!","try!","unimplemented!","unreachable!","vec!","write!","writeln!","macro_rules!","assert_ne!","debug_assert_ne!"],r=["i8","i16","i32","i64","i128","isize","u8","u16","u32","u64","u128","usize","f32","f64","str","char","bool","Box","Option","Result","String","Vec"] +;return{name:"Rust",aliases:["rs"],keywords:{$pattern:e.IDENT_RE+"!?",type:r, +keyword:["abstract","as","async","await","become","box","break","const","continue","crate","do","dyn","else","enum","extern","false","final","fn","for","if","impl","in","let","loop","macro","match","mod","move","mut","override","priv","pub","ref","return","self","Self","static","struct","super","trait","true","try","type","typeof","unsafe","unsized","use","virtual","where","while","yield"], +literal:["true","false","Some","None","Ok","Err"],built_in:i},illegal:""},t]}}, +grmr_scss:e=>{const n=ie(e),t=le,a=oe,i="@[a-z-]+",r={className:"variable", +begin:"(\\$[a-zA-Z-][a-zA-Z0-9_-]*)\\b",relevance:0};return{name:"SCSS", +case_insensitive:!0,illegal:"[=/|']", +contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,n.CSS_NUMBER_MODE,{ +className:"selector-id",begin:"#[A-Za-z0-9_-]+",relevance:0},{ +className:"selector-class",begin:"\\.[A-Za-z0-9_-]+",relevance:0 +},n.ATTRIBUTE_SELECTOR_MODE,{className:"selector-tag", +begin:"\\b("+re.join("|")+")\\b",relevance:0},{className:"selector-pseudo", +begin:":("+a.join("|")+")"},{className:"selector-pseudo", +begin:":(:)?("+t.join("|")+")"},r,{begin:/\(/,end:/\)/, +contains:[n.CSS_NUMBER_MODE]},n.CSS_VARIABLE,{className:"attribute", +begin:"\\b("+ce.join("|")+")\\b"},{ +begin:"\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b" +},{begin:/:/,end:/[;}{]/,relevance:0, +contains:[n.BLOCK_COMMENT,r,n.HEXCOLOR,n.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,n.IMPORTANT,n.FUNCTION_DISPATCH] +},{begin:"@(page|font-face)",keywords:{$pattern:i,keyword:"@page @font-face"}},{ +begin:"@",end:"[{;]",returnBegin:!0,keywords:{$pattern:/[a-z-]+/, +keyword:"and or not only",attribute:se.join(" ")},contains:[{begin:i, +className:"keyword"},{begin:/[a-z-]+(?=:)/,className:"attribute" +},r,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,n.HEXCOLOR,n.CSS_NUMBER_MODE] +},n.FUNCTION_DISPATCH]}},grmr_shell:e=>({name:"Shell Session", +aliases:["console","shellsession"],contains:[{className:"meta.prompt", +begin:/^\s{0,3}[/~\w\d[\]()@-]*[>%$#][ ]?/,starts:{end:/[^\\](?=\s*$)/, +subLanguage:"bash"}}]}),grmr_sparql:e=>{const n=xe(e).exports,t={ +className:"variable",begin:"[?$]"+e.IDENT_RE,relevance:0};return{ +case_insensitive:!0,name:"SPARQL",keywords:{ +keyword:"base|10 prefix|10 @base|10 @prefix|10 add all as|0 ask bind by|0 clear construct|10 copymove create data default define delete describe distinct drop exists filter from|0 graph|10 group having in|0 insert limit load minus named|10 not offset optional order reduced select|0 service silent to union using values where with|0", +function:"abs asc avg bound ceil coalesce concat containsstrbefore count dayhours desc encode_for_uri floor group_concat if|0 iri isblank isiri isliteral isnumeric isuri langdatatype langmatches lcase max md5 min|0 minutes month now rand regex replace round sameterm sample seconds separator sha1 sha256 sha384 sha512 str strafter strdt strends strlang strlen strstarts struuid substr sum then timezone tz ucase uribnode uuid year", +literal:"true|0 false|0",built_in:"a|0"},aliases:["sparql","rql","rq","ru"], +contains:[n.LANGTAG,n.DATATYPE,n.IRI_LITERAL,n.BLANK_NODE,n.PNAME,t,{ +begin:/"""\s*\{/,end:/"""/,subLanguage:"json",excludeBegin:!0,excludeEnd:!0, +relevance:0},{begin:/'''\s*\{/,end:/'''/,subLanguage:"json",excludeBegin:!0, +excludeEnd:!0,relevance:0 +},n.TRIPLE_QUOTE_STRING,n.TRIPLE_APOS_STRING,n.QUOTE_STRING_LITERAL,n.APOS_STRING_LITERAL,n.NUMBER,e.HASH_COMMENT_MODE] +}},grmr_sql:e=>{ +const n=e.regex,t=e.COMMENT("--","$"),a=["true","false","unknown"],i=["bigint","binary","blob","boolean","char","character","clob","date","dec","decfloat","decimal","float","int","integer","interval","nchar","nclob","national","numeric","real","row","smallint","time","timestamp","varchar","varying","varbinary"],r=["abs","acos","array_agg","asin","atan","avg","cast","ceil","ceiling","coalesce","corr","cos","cosh","count","covar_pop","covar_samp","cume_dist","dense_rank","deref","element","exp","extract","first_value","floor","json_array","json_arrayagg","json_exists","json_object","json_objectagg","json_query","json_table","json_table_primitive","json_value","lag","last_value","lead","listagg","ln","log","log10","lower","max","min","mod","nth_value","ntile","nullif","percent_rank","percentile_cont","percentile_disc","position","position_regex","power","rank","regr_avgx","regr_avgy","regr_count","regr_intercept","regr_r2","regr_slope","regr_sxx","regr_sxy","regr_syy","row_number","sin","sinh","sqrt","stddev_pop","stddev_samp","substring","substring_regex","sum","tan","tanh","translate","translate_regex","treat","trim","trim_array","unnest","upper","value_of","var_pop","var_samp","width_bucket"],s=["create table","insert into","primary key","foreign key","not null","alter table","add constraint","grouping sets","on overflow","character set","respect nulls","ignore nulls","nulls first","nulls last","depth first","breadth first"],o=r,l=["abs","acos","all","allocate","alter","and","any","are","array","array_agg","array_max_cardinality","as","asensitive","asin","asymmetric","at","atan","atomic","authorization","avg","begin","begin_frame","begin_partition","between","bigint","binary","blob","boolean","both","by","call","called","cardinality","cascaded","case","cast","ceil","ceiling","char","char_length","character","character_length","check","classifier","clob","close","coalesce","collate","collect","column","commit","condition","connect","constraint","contains","convert","copy","corr","corresponding","cos","cosh","count","covar_pop","covar_samp","create","cross","cube","cume_dist","current","current_catalog","current_date","current_default_transform_group","current_path","current_role","current_row","current_schema","current_time","current_timestamp","current_path","current_role","current_transform_group_for_type","current_user","cursor","cycle","date","day","deallocate","dec","decimal","decfloat","declare","default","define","delete","dense_rank","deref","describe","deterministic","disconnect","distinct","double","drop","dynamic","each","element","else","empty","end","end_frame","end_partition","end-exec","equals","escape","every","except","exec","execute","exists","exp","external","extract","false","fetch","filter","first_value","float","floor","for","foreign","frame_row","free","from","full","function","fusion","get","global","grant","group","grouping","groups","having","hold","hour","identity","in","indicator","initial","inner","inout","insensitive","insert","int","integer","intersect","intersection","interval","into","is","join","json_array","json_arrayagg","json_exists","json_object","json_objectagg","json_query","json_table","json_table_primitive","json_value","lag","language","large","last_value","lateral","lead","leading","left","like","like_regex","listagg","ln","local","localtime","localtimestamp","log","log10","lower","match","match_number","match_recognize","matches","max","member","merge","method","min","minute","mod","modifies","module","month","multiset","national","natural","nchar","nclob","new","no","none","normalize","not","nth_value","ntile","null","nullif","numeric","octet_length","occurrences_regex","of","offset","old","omit","on","one","only","open","or","order","out","outer","over","overlaps","overlay","parameter","partition","pattern","per","percent","percent_rank","percentile_cont","percentile_disc","period","portion","position","position_regex","power","precedes","precision","prepare","primary","procedure","ptf","range","rank","reads","real","recursive","ref","references","referencing","regr_avgx","regr_avgy","regr_count","regr_intercept","regr_r2","regr_slope","regr_sxx","regr_sxy","regr_syy","release","result","return","returns","revoke","right","rollback","rollup","row","row_number","rows","running","savepoint","scope","scroll","search","second","seek","select","sensitive","session_user","set","show","similar","sin","sinh","skip","smallint","some","specific","specifictype","sql","sqlexception","sqlstate","sqlwarning","sqrt","start","static","stddev_pop","stddev_samp","submultiset","subset","substring","substring_regex","succeeds","sum","symmetric","system","system_time","system_user","table","tablesample","tan","tanh","then","time","timestamp","timezone_hour","timezone_minute","to","trailing","translate","translate_regex","translation","treat","trigger","trim","trim_array","true","truncate","uescape","union","unique","unknown","unnest","update","upper","user","using","value","values","value_of","var_pop","var_samp","varbinary","varchar","varying","versioning","when","whenever","where","width_bucket","window","with","within","without","year","add","asc","collation","desc","final","first","last","view"].filter((e=>!r.includes(e))),c={ +begin:n.concat(/\b/,n.either(...o),/\s*\(/),relevance:0,keywords:{built_in:o}} +;return{name:"SQL",case_insensitive:!0,illegal:/[{}]|<\//,keywords:{ +$pattern:/\b[\w\.]+/,keyword:((e,{exceptions:n,when:t}={})=>{const a=t +;return n=n||[],e.map((e=>e.match(/\|\d+$/)||n.includes(e)?e:a(e)?e+"|0":e)) +})(l,{when:e=>e.length<3}),literal:a,type:i, +built_in:["current_catalog","current_date","current_default_transform_group","current_path","current_role","current_schema","current_transform_group_for_type","current_user","session_user","system_time","system_user","current_time","localtime","current_timestamp","localtimestamp"] +},contains:[{begin:n.either(...s),relevance:0,keywords:{$pattern:/[\w\.]+/, +keyword:l.concat(s),literal:a,type:i}},{className:"type", +begin:n.either("double precision","large object","with timezone","without timezone") +},c,{className:"variable",begin:/@[a-z0-9][a-z0-9_]*/},{className:"string", +variants:[{begin:/'/,end:/'/,contains:[{begin:/''/}]}]},{begin:/"/,end:/"/, +contains:[{begin:/""/}]},e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE,t,{ +className:"operator",begin:/[-+*/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?/, +relevance:0}]}},grmr_swift:e=>{const n={match:/\s+/,relevance:0 +},t=e.COMMENT("/\\*","\\*/",{contains:["self"]}),a=[e.C_LINE_COMMENT_MODE,t],i={ +match:[/\./,m(...Me,...Se)],className:{2:"keyword"}},r={match:b(/\./,m(...Ce)), +relevance:0},s=Ce.filter((e=>"string"==typeof e)).concat(["_|0"]),o={variants:[{ +className:"keyword", +match:m(...Ce.filter((e=>"string"!=typeof e)).concat(Ae).map(ke),...Se)}]},l={ +$pattern:m(/\b\w+/,/#\w+/),keyword:s.concat(Ie),literal:Te},c=[i,r,o],g=[{ +match:b(/\./,m(...De)),relevance:0},{className:"built_in", +match:b(/\b/,m(...De),/(?=\()/)}],u={match:/->/,relevance:0},p=[u,{ +className:"operator",relevance:0,variants:[{match:$e},{match:`\\.(\\.|${Be})+`}] +}],_="([0-9]_*)+",h="([0-9a-fA-F]_*)+",f={className:"number",relevance:0, +variants:[{match:`\\b(${_})(\\.(${_}))?([eE][+-]?(${_}))?\\b`},{ +match:`\\b0x(${h})(\\.(${h}))?([pP][+-]?(${_}))?\\b`},{match:/\b0o([0-7]_*)+\b/ +},{match:/\b0b([01]_*)+\b/}]},E=(e="")=>({className:"subst",variants:[{ +match:b(/\\/,e,/[0\\tnr"']/)},{match:b(/\\/,e,/u\{[0-9a-fA-F]{1,8}\}/)}] +}),y=(e="")=>({className:"subst",match:b(/\\/,e,/[\t ]*(?:[\r\n]|\r\n)/) +}),N=(e="")=>({className:"subst",label:"interpol",begin:b(/\\/,e,/\(/),end:/\)/ +}),w=(e="")=>({begin:b(e,/"""/),end:b(/"""/,e),contains:[E(e),y(e),N(e)] +}),v=(e="")=>({begin:b(e,/"/),end:b(/"/,e),contains:[E(e),N(e)]}),O={ +className:"string", +variants:[w(),w("#"),w("##"),w("###"),v(),v("#"),v("##"),v("###")] +},x=[e.BACKSLASH_ESCAPE,{begin:/\[/,end:/\]/,relevance:0, +contains:[e.BACKSLASH_ESCAPE]}],k={begin:/\/[^\s](?=[^/\n]*\/)/,end:/\//, +contains:x},M=e=>{const n=b(e,/\//),t=b(/\//,e);return{begin:n,end:t, +contains:[...x,{scope:"comment",begin:`#(?!.*${t})`,end:/$/}]}},S={ +scope:"regexp",variants:[M("###"),M("##"),M("#"),k]},A={match:b(/`/,Ue,/`/) +},C=[A,{className:"variable",match:/\$\d+/},{className:"variable", +match:`\\$${Fe}+`}],T=[{match:/(@|#(un)?)available/,scope:"keyword",starts:{ +contains:[{begin:/\(/,end:/\)/,keywords:Ke,contains:[...p,f,O]}]}},{ +scope:"keyword",match:b(/@/,m(...je))},{scope:"meta",match:b(/@/,Ue)}],R={ +match:d(/\b[A-Z]/),relevance:0,contains:[{className:"type", +match:b(/(AV|CA|CF|CG|CI|CL|CM|CN|CT|MK|MP|MTK|MTL|NS|SCN|SK|UI|WK|XC)/,Fe,"+") +},{className:"type",match:Pe,relevance:0},{match:/[?!]+/,relevance:0},{ +match:/\.\.\./,relevance:0},{match:b(/\s+&\s+/,d(Pe)),relevance:0}]},I={ +begin://,keywords:l,contains:[...a,...c,...T,u,R]};R.contains.push(I) +;const D={begin:/\(/,end:/\)/,relevance:0,keywords:l,contains:["self",{ +match:b(Ue,/\s*:/),keywords:"_|0",relevance:0 +},...a,S,...c,...g,...p,f,O,...C,...T,R]},L={begin://, +keywords:"repeat each",contains:[...a,R]},B={begin:/\(/,end:/\)/,keywords:l, +contains:[{begin:m(d(b(Ue,/\s*:/)),d(b(Ue,/\s+/,Ue,/\s*:/))),end:/:/, +relevance:0,contains:[{className:"keyword",match:/\b_\b/},{className:"params", +match:Ue}]},...a,...c,...p,f,O,...T,R,D],endsParent:!0,illegal:/["']/},$={ +match:[/(func|macro)/,/\s+/,m(A.match,Ue,$e)],className:{1:"keyword", +3:"title.function"},contains:[L,B,n],illegal:[/\[/,/%/]},z={ +match:[/\b(?:subscript|init[?!]?)/,/\s*(?=[<(])/],className:{1:"keyword"}, +contains:[L,B,n],illegal:/\[|%/},F={match:[/operator/,/\s+/,$e],className:{ +1:"keyword",3:"title"}},U={begin:[/precedencegroup/,/\s+/,Pe],className:{ +1:"keyword",3:"title"},contains:[R],keywords:[...Re,...Te],end:/}/} +;for(const e of O.variants){const n=e.contains.find((e=>"interpol"===e.label)) +;n.keywords=l;const t=[...c,...g,...p,f,O,...C];n.contains=[...t,{begin:/\(/, +end:/\)/,contains:["self",...t]}]}return{name:"Swift",keywords:l, +contains:[...a,$,z,{beginKeywords:"struct protocol class extension enum actor", +end:"\\{",excludeEnd:!0,keywords:l,contains:[e.inherit(e.TITLE_MODE,{ +className:"title.class",begin:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/}),...c] +},F,U,{beginKeywords:"import",end:/$/,contains:[...a],relevance:0 +},S,...c,...g,...p,f,O,...C,...T,R,D]}},grmr_turtle:xe,grmr_typescript:e=>{ +const n=Oe(e),t=_e,a=["any","void","number","boolean","string","object","never","symbol","bigint","unknown"],i={ +beginKeywords:"namespace",end:/\{/,excludeEnd:!0, +contains:[n.exports.CLASS_REFERENCE]},r={beginKeywords:"interface",end:/\{/, +excludeEnd:!0,keywords:{keyword:"interface extends",built_in:a}, +contains:[n.exports.CLASS_REFERENCE]},s={$pattern:_e, +keyword:he.concat(["type","namespace","interface","public","private","protected","implements","declare","abstract","readonly","enum","override"]), +literal:fe,built_in:ve.concat(a),"variable.language":we},o={className:"meta", +begin:"@"+t},l=(e,n,t)=>{const a=e.contains.findIndex((e=>e.label===n)) +;if(-1===a)throw Error("can not find mode to replace");e.contains.splice(a,1,t)} +;return Object.assign(n.keywords,s), +n.exports.PARAMS_CONTAINS.push(o),n.contains=n.contains.concat([o,i,r]), +l(n,"shebang",e.SHEBANG()),l(n,"use_strict",{className:"meta",relevance:10, +begin:/^\s*['"]use strict['"]/ +}),n.contains.find((e=>"func.def"===e.label)).relevance=0,Object.assign(n,{ +name:"TypeScript",aliases:["ts","tsx","mts","cts"]}),n},grmr_vbnet:e=>{ +const n=e.regex,t=/\d{1,2}\/\d{1,2}\/\d{4}/,a=/\d{4}-\d{1,2}-\d{1,2}/,i=/(\d|1[012])(:\d+){0,2} *(AM|PM)/,r=/\d{1,2}(:\d{1,2}){1,2}/,s={ +className:"literal",variants:[{begin:n.concat(/# */,n.either(a,t),/ *#/)},{ +begin:n.concat(/# */,r,/ *#/)},{begin:n.concat(/# */,i,/ *#/)},{ +begin:n.concat(/# */,n.either(a,t),/ +/,n.either(i,r),/ *#/)}] +},o=e.COMMENT(/'''/,/$/,{contains:[{className:"doctag",begin:/<\/?/,end:/>/}] +}),l=e.COMMENT(null,/$/,{variants:[{begin:/'/},{begin:/([\t ]|^)REM(?=\s)/}]}) +;return{name:"Visual Basic .NET",aliases:["vb"],case_insensitive:!0, +classNameAliases:{label:"symbol"},keywords:{ +keyword:"addhandler alias aggregate ansi as async assembly auto binary by byref byval call case catch class compare const continue custom declare default delegate dim distinct do each equals else elseif end enum erase error event exit explicit finally for friend from function get global goto group handles if implements imports in inherits interface into iterator join key let lib loop me mid module mustinherit mustoverride mybase myclass namespace narrowing new next notinheritable notoverridable of off on operator option optional order overloads overridable overrides paramarray partial preserve private property protected public raiseevent readonly redim removehandler resume return select set shadows shared skip static step stop structure strict sub synclock take text then throw to try unicode until using when where while widening with withevents writeonly yield", +built_in:"addressof and andalso await directcast gettype getxmlnamespace is isfalse isnot istrue like mod nameof new not or orelse trycast typeof xor cbool cbyte cchar cdate cdbl cdec cint clng cobj csbyte cshort csng cstr cuint culng cushort", +type:"boolean byte char date decimal double integer long object sbyte short single string uinteger ulong ushort", +literal:"true false nothing"}, +illegal:"//|\\{|\\}|endif|gosub|variant|wend|^\\$ ",contains:[{ +className:"string",begin:/"(""|[^/n])"C\b/},{className:"string",begin:/"/, +end:/"/,illegal:/\n/,contains:[{begin:/""/}]},s,{className:"number",relevance:0, +variants:[{begin:/\b\d[\d_]*((\.[\d_]+(E[+-]?[\d_]+)?)|(E[+-]?[\d_]+))[RFD@!#]?/ +},{begin:/\b\d[\d_]*((U?[SIL])|[%&])?/},{begin:/&H[\dA-F_]+((U?[SIL])|[%&])?/},{ +begin:/&O[0-7_]+((U?[SIL])|[%&])?/},{begin:/&B[01_]+((U?[SIL])|[%&])?/}]},{ +className:"label",begin:/^\w+:/},o,l,{className:"meta", +begin:/[\t ]*#(const|disable|else|elseif|enable|end|externalsource|if|region)\b/, +end:/$/,keywords:{ +keyword:"const disable else elseif enable end externalsource if region then"}, +contains:[l]}]}},grmr_wasm:e=>{e.regex;const n=e.COMMENT(/\(;/,/;\)/) +;return n.contains.push("self"),{name:"WebAssembly",keywords:{$pattern:/[\w.]+/, +keyword:["anyfunc","block","br","br_if","br_table","call","call_indirect","data","drop","elem","else","end","export","func","global.get","global.set","local.get","local.set","local.tee","get_global","get_local","global","if","import","local","loop","memory","memory.grow","memory.size","module","mut","nop","offset","param","result","return","select","set_global","set_local","start","table","tee_local","then","type","unreachable"] +},contains:[e.COMMENT(/;;/,/$/),n,{match:[/(?:offset|align)/,/\s*/,/=/], +className:{1:"keyword",3:"operator"}},{className:"variable",begin:/\$[\w_]+/},{ +match:/(\((?!;)|\))+/,className:"punctuation",relevance:0},{ +begin:[/(?:func|call|call_indirect)/,/\s+/,/\$[^\s)]+/],className:{1:"keyword", +3:"title.function"}},e.QUOTE_STRING_MODE,{match:/(i32|i64|f32|f64)(?!\.)/, +className:"type"},{className:"keyword", +match:/\b(f32|f64|i32|i64)(?:\.(?:abs|add|and|ceil|clz|const|convert_[su]\/i(?:32|64)|copysign|ctz|demote\/f64|div(?:_[su])?|eqz?|extend_[su]\/i32|floor|ge(?:_[su])?|gt(?:_[su])?|le(?:_[su])?|load(?:(?:8|16|32)_[su])?|lt(?:_[su])?|max|min|mul|nearest|neg?|or|popcnt|promote\/f32|reinterpret\/[fi](?:32|64)|rem_[su]|rot[lr]|shl|shr_[su]|store(?:8|16|32)?|sqrt|sub|trunc(?:_[su]\/f(?:32|64))?|wrap\/i64|xor))\b/ +},{className:"number",relevance:0, +match:/[+-]?\b(?:\d(?:_?\d)*(?:\.\d(?:_?\d)*)?(?:[eE][+-]?\d(?:_?\d)*)?|0x[\da-fA-F](?:_?[\da-fA-F])*(?:\.[\da-fA-F](?:_?[\da-fA-D])*)?(?:[pP][+-]?\d(?:_?\d)*)?)\b|\binf\b|\bnan(?::0x[\da-fA-F](?:_?[\da-fA-D])*)?\b/ +}]}},grmr_xml:e=>{ +const n=e.regex,t=n.concat(/[\p{L}_]/u,n.optional(/[\p{L}0-9_.-]*:/u),/[\p{L}0-9_.-]*/u),a={ +className:"symbol",begin:/&[a-z]+;|&#[0-9]+;|&#x[a-f0-9]+;/},i={begin:/\s/, +contains:[{className:"keyword",begin:/#?[a-z_][a-z1-9_-]+/,illegal:/\n/}] +},r=e.inherit(i,{begin:/\(/,end:/\)/}),s=e.inherit(e.APOS_STRING_MODE,{ +className:"string"}),o=e.inherit(e.QUOTE_STRING_MODE,{className:"string"}),l={ +endsWithParent:!0,illegal:/`]+/}]}]}]};return{ +name:"HTML, XML", +aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist","wsf","svg"], +case_insensitive:!0,unicodeRegex:!0,contains:[{className:"meta",begin://,relevance:10,contains:[i,o,s,r,{begin:/\[/,end:/\]/,contains:[{ +className:"meta",begin://,contains:[i,r,o,s]}]}] +},e.COMMENT(//,{relevance:10}),{begin://, +relevance:10},a,{className:"meta",end:/\?>/,variants:[{begin:/<\?xml/, +relevance:10,contains:[o]},{begin:/<\?[a-z][a-z0-9]+/}]},{className:"tag", +begin:/)/,end:/>/,keywords:{name:"style"},contains:[l],starts:{ +end:/<\/style>/,returnEnd:!0,subLanguage:["css","xml"]}},{className:"tag", +begin:/)/,end:/>/,keywords:{name:"script"},contains:[l],starts:{ +end:/<\/script>/,returnEnd:!0,subLanguage:["javascript","handlebars","xml"]}},{ +className:"tag",begin:/<>|<\/>/},{className:"tag", +begin:n.concat(//,/>/,/\s/)))), +end:/\/?>/,contains:[{className:"name",begin:t,relevance:0,starts:l}]},{ +className:"tag",begin:n.concat(/<\//,n.lookahead(n.concat(t,/>/))),contains:[{ +className:"name",begin:t,relevance:0},{begin:/>/,relevance:0,endsParent:!0}]}]} +},grmr_yaml:e=>{ +const n="true false yes no null",t="[\\w#;/?:@&=+$,.~*'()[\\]]+",a={ +className:"string",relevance:0,variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/ +},{begin:/\S+/}],contains:[e.BACKSLASH_ESCAPE,{className:"template-variable", +variants:[{begin:/\{\{/,end:/\}\}/},{begin:/%\{/,end:/\}/}]}]},i=e.inherit(a,{ +variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/[^\s,{}[\]]+/}]}),r={ +end:",",endsWithParent:!0,excludeEnd:!0,keywords:n,relevance:0},s={begin:/\{/, +end:/\}/,contains:[r],illegal:"\\n",relevance:0},o={begin:"\\[",end:"\\]", +contains:[r],illegal:"\\n",relevance:0},l=[{className:"attr",variants:[{ +begin:"\\w[\\w :\\/.-]*:(?=[ \t]|$)"},{begin:'"\\w[\\w :\\/.-]*":(?=[ \t]|$)'},{ +begin:"'\\w[\\w :\\/.-]*':(?=[ \t]|$)"}]},{className:"meta",begin:"^---\\s*$", +relevance:10},{className:"string", +begin:"[\\|>]([1-9]?[+-])?[ ]*\\n( +)[^ ][^\\n]*\\n(\\2[^\\n]+\\n?)*"},{ +begin:"<%[%=-]?",end:"[%-]?%>",subLanguage:"ruby",excludeBegin:!0,excludeEnd:!0, +relevance:0},{className:"type",begin:"!\\w+!"+t},{className:"type", +begin:"!<"+t+">"},{className:"type",begin:"!"+t},{className:"type",begin:"!!"+t +},{className:"meta",begin:"&"+e.UNDERSCORE_IDENT_RE+"$"},{className:"meta", +begin:"\\*"+e.UNDERSCORE_IDENT_RE+"$"},{className:"bullet",begin:"-(?=[ ]|$)", +relevance:0},e.HASH_COMMENT_MODE,{beginKeywords:n,keywords:{literal:n}},{ +className:"number", +begin:"\\b[0-9]{4}(-[0-9][0-9]){0,2}([Tt \\t][0-9][0-9]?(:[0-9][0-9]){2})?(\\.[0-9]*)?([ \\t])*(Z|[-+][0-9][0-9]?(:[0-9][0-9])?)?\\b" +},{className:"number",begin:e.C_NUMBER_RE+"\\b",relevance:0},s,o,a],c=[...l] +;return c.pop(),c.push(i),r.contains=c,{name:"YAML",case_insensitive:!0, +aliases:["yml"],contains:l}}});const qe=ae;for(const e of Object.keys(He)){ +const n=e.replace("grmr_","").replace("_","-");qe.registerLanguage(n,He[e])} +return qe}() +;"object"==typeof exports&&"undefined"!=typeof module&&(module.exports=hljs); \ No newline at end of file diff --git a/js/html5shiv.min.js b/js/html5shiv.min.js new file mode 100644 index 00000000..1a01c94b --- /dev/null +++ b/js/html5shiv.min.js @@ -0,0 +1,4 @@ +/** +* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed +*/ +!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); diff --git a/js/jquery-3.6.0.min.js b/js/jquery-3.6.0.min.js new file mode 100644 index 00000000..c4c6022f --- /dev/null +++ b/js/jquery-3.6.0.min.js @@ -0,0 +1,2 @@ +/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t { + hljs.highlightAll() + Array.from(document.querySelectorAll('.toctree-l1 a')) + .filter(el => customUrlMappingLevel1.has(el.innerText)) + .map(el => el.href = prefix + customUrlMappingLevel1.get(el.innerText)) + + Array.from(document.querySelectorAll('.toctree-l2 a')) + .filter(el => customUrlMappingLevel2.has(el.innerText) && el.href.endsWith('#')) + .map(el => el.href = prefix + customUrlMappingLevel2.get(el.innerText)) + + if (window.location.pathname.endsWith('search.html')) { + const li = document.createElement('li') + li.innerHTML = document.querySelector('h1').innerHTML + document.querySelector('.wy-breadcrumbs') + .appendChild(li) + + // document.querySelectorAll('#mkdocs-search-results article h3 a') + // .forEach(el => el.innerText = 'aaa' ) + } + // activate hamburger menu: + document.querySelector('[data-toggle="wy-nav-top"]').addEventListener('click', (e) => { + document.querySelectorAll('[data-toggle="wy-nav-shift"]') + .forEach(el => el.classList.toggle('shift')) + }) + + const activeTOClink = () => { + const el = Array.from(document.querySelectorAll('h1, h2, h3, h4, h5')) + .filter(heading => heading.getBoundingClientRect().y <= 40).pop() + if (el) { + Array.from(document.querySelectorAll('div.toc a')).map(a => a.classList.remove('active')) + document.querySelector(`div.toc a[href="#${el.id}"]`)?.classList.add('active') + } + } + + if ('onscrollend' in window) { + document.addEventListener('scrollend', activeTOClink) + } else { + document.addEventListener('scroll', () => { + clearTimeout(window.scrollEndTimer) + window.scrollEndTimer = setTimeout(activeTOClink, 100) + }) + } + +} diff --git a/search.html b/search.html new file mode 100644 index 00000000..4c15a9bd --- /dev/null +++ b/search.html @@ -0,0 +1,315 @@ + + + + + + + + Triply Documentation + + + + + + + + + +
+ + +
+ +
+
+
    +
  • +
  • +
  • +
+
+
+
+
+ + +

Search Results

+ + + +
+ Searching... +
+ + +
+
+ +
+ +
+ +
+
+ +
+
+ +
+ +
+ +
+ + + + + +
+ + + + + + + diff --git a/search/lunr.js b/search/lunr.js new file mode 100644 index 00000000..aca0a167 --- /dev/null +++ b/search/lunr.js @@ -0,0 +1,3475 @@ +/** + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9 + * Copyright (C) 2020 Oliver Nightingale + * @license MIT + */ + +;(function(){ + +/** + * A convenience function for configuring and constructing + * a new lunr Index. + * + * A lunr.Builder instance is created and the pipeline setup + * with a trimmer, stop word filter and stemmer. + * + * This builder object is yielded to the configuration function + * that is passed as a parameter, allowing the list of fields + * and other builder parameters to be customised. + * + * All documents _must_ be added within the passed config function. + * + * @example + * var idx = lunr(function () { + * this.field('title') + * this.field('body') + * this.ref('id') + * + * documents.forEach(function (doc) { + * this.add(doc) + * }, this) + * }) + * + * @see {@link lunr.Builder} + * @see {@link lunr.Pipeline} + * @see {@link lunr.trimmer} + * @see {@link lunr.stopWordFilter} + * @see {@link lunr.stemmer} + * @namespace {function} lunr + */ +var lunr = function (config) { + var builder = new lunr.Builder + + builder.pipeline.add( + lunr.trimmer, + lunr.stopWordFilter, + lunr.stemmer + ) + + builder.searchPipeline.add( + lunr.stemmer + ) + + config.call(builder, builder) + return builder.build() +} + +lunr.version = "2.3.9" +/*! + * lunr.utils + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A namespace containing utils for the rest of the lunr library + * @namespace lunr.utils + */ +lunr.utils = {} + +/** + * Print a warning message to the console. + * + * @param {String} message The message to be printed. + * @memberOf lunr.utils + * @function + */ +lunr.utils.warn = (function (global) { + /* eslint-disable no-console */ + return function (message) { + if (global.console && console.warn) { + console.warn(message) + } + } + /* eslint-enable no-console */ +})(this) + +/** + * Convert an object to a string. + * + * In the case of `null` and `undefined` the function returns + * the empty string, in all other cases the result of calling + * `toString` on the passed object is returned. + * + * @param {Any} obj The object to convert to a string. + * @return {String} string representation of the passed object. + * @memberOf lunr.utils + */ +lunr.utils.asString = function (obj) { + if (obj === void 0 || obj === null) { + return "" + } else { + return obj.toString() + } +} + +/** + * Clones an object. + * + * Will create a copy of an existing object such that any mutations + * on the copy cannot affect the original. + * + * Only shallow objects are supported, passing a nested object to this + * function will cause a TypeError. + * + * Objects with primitives, and arrays of primitives are supported. + * + * @param {Object} obj The object to clone. + * @return {Object} a clone of the passed object. + * @throws {TypeError} when a nested object is passed. + * @memberOf Utils + */ +lunr.utils.clone = function (obj) { + if (obj === null || obj === undefined) { + return obj + } + + var clone = Object.create(null), + keys = Object.keys(obj) + + for (var i = 0; i < keys.length; i++) { + var key = keys[i], + val = obj[key] + + if (Array.isArray(val)) { + clone[key] = val.slice() + continue + } + + if (typeof val === 'string' || + typeof val === 'number' || + typeof val === 'boolean') { + clone[key] = val + continue + } + + throw new TypeError("clone is not deep and does not support nested objects") + } + + return clone +} +lunr.FieldRef = function (docRef, fieldName, stringValue) { + this.docRef = docRef + this.fieldName = fieldName + this._stringValue = stringValue +} + +lunr.FieldRef.joiner = "/" + +lunr.FieldRef.fromString = function (s) { + var n = s.indexOf(lunr.FieldRef.joiner) + + if (n === -1) { + throw "malformed field ref string" + } + + var fieldRef = s.slice(0, n), + docRef = s.slice(n + 1) + + return new lunr.FieldRef (docRef, fieldRef, s) +} + +lunr.FieldRef.prototype.toString = function () { + if (this._stringValue == undefined) { + this._stringValue = this.fieldName + lunr.FieldRef.joiner + this.docRef + } + + return this._stringValue +} +/*! + * lunr.Set + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A lunr set. + * + * @constructor + */ +lunr.Set = function (elements) { + this.elements = Object.create(null) + + if (elements) { + this.length = elements.length + + for (var i = 0; i < this.length; i++) { + this.elements[elements[i]] = true + } + } else { + this.length = 0 + } +} + +/** + * A complete set that contains all elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.complete = { + intersect: function (other) { + return other + }, + + union: function () { + return this + }, + + contains: function () { + return true + } +} + +/** + * An empty set that contains no elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.empty = { + intersect: function () { + return this + }, + + union: function (other) { + return other + }, + + contains: function () { + return false + } +} + +/** + * Returns true if this set contains the specified object. + * + * @param {object} object - Object whose presence in this set is to be tested. + * @returns {boolean} - True if this set contains the specified object. + */ +lunr.Set.prototype.contains = function (object) { + return !!this.elements[object] +} + +/** + * Returns a new set containing only the elements that are present in both + * this set and the specified set. + * + * @param {lunr.Set} other - set to intersect with this set. + * @returns {lunr.Set} a new set that is the intersection of this and the specified set. + */ + +lunr.Set.prototype.intersect = function (other) { + var a, b, elements, intersection = [] + + if (other === lunr.Set.complete) { + return this + } + + if (other === lunr.Set.empty) { + return other + } + + if (this.length < other.length) { + a = this + b = other + } else { + a = other + b = this + } + + elements = Object.keys(a.elements) + + for (var i = 0; i < elements.length; i++) { + var element = elements[i] + if (element in b.elements) { + intersection.push(element) + } + } + + return new lunr.Set (intersection) +} + +/** + * Returns a new set combining the elements of this and the specified set. + * + * @param {lunr.Set} other - set to union with this set. + * @return {lunr.Set} a new set that is the union of this and the specified set. + */ + +lunr.Set.prototype.union = function (other) { + if (other === lunr.Set.complete) { + return lunr.Set.complete + } + + if (other === lunr.Set.empty) { + return this + } + + return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements))) +} +/** + * A function to calculate the inverse document frequency for + * a posting. This is shared between the builder and the index + * + * @private + * @param {object} posting - The posting for a given term + * @param {number} documentCount - The total number of documents. + */ +lunr.idf = function (posting, documentCount) { + var documentsWithTerm = 0 + + for (var fieldName in posting) { + if (fieldName == '_index') continue // Ignore the term index, its not a field + documentsWithTerm += Object.keys(posting[fieldName]).length + } + + var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5) + + return Math.log(1 + Math.abs(x)) +} + +/** + * A token wraps a string representation of a token + * as it is passed through the text processing pipeline. + * + * @constructor + * @param {string} [str=''] - The string token being wrapped. + * @param {object} [metadata={}] - Metadata associated with this token. + */ +lunr.Token = function (str, metadata) { + this.str = str || "" + this.metadata = metadata || {} +} + +/** + * Returns the token string that is being wrapped by this object. + * + * @returns {string} + */ +lunr.Token.prototype.toString = function () { + return this.str +} + +/** + * A token update function is used when updating or optionally + * when cloning a token. + * + * @callback lunr.Token~updateFunction + * @param {string} str - The string representation of the token. + * @param {Object} metadata - All metadata associated with this token. + */ + +/** + * Applies the given function to the wrapped string token. + * + * @example + * token.update(function (str, metadata) { + * return str.toUpperCase() + * }) + * + * @param {lunr.Token~updateFunction} fn - A function to apply to the token string. + * @returns {lunr.Token} + */ +lunr.Token.prototype.update = function (fn) { + this.str = fn(this.str, this.metadata) + return this +} + +/** + * Creates a clone of this token. Optionally a function can be + * applied to the cloned token. + * + * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token. + * @returns {lunr.Token} + */ +lunr.Token.prototype.clone = function (fn) { + fn = fn || function (s) { return s } + return new lunr.Token (fn(this.str, this.metadata), this.metadata) +} +/*! + * lunr.tokenizer + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A function for splitting a string into tokens ready to be inserted into + * the search index. Uses `lunr.tokenizer.separator` to split strings, change + * the value of this property to change how strings are split into tokens. + * + * This tokenizer will convert its parameter to a string by calling `toString` and + * then will split this string on the character in `lunr.tokenizer.separator`. + * Arrays will have their elements converted to strings and wrapped in a lunr.Token. + * + * Optional metadata can be passed to the tokenizer, this metadata will be cloned and + * added as metadata to every token that is created from the object to be tokenized. + * + * @static + * @param {?(string|object|object[])} obj - The object to convert into tokens + * @param {?object} metadata - Optional metadata to associate with every token + * @returns {lunr.Token[]} + * @see {@link lunr.Pipeline} + */ +lunr.tokenizer = function (obj, metadata) { + if (obj == null || obj == undefined) { + return [] + } + + if (Array.isArray(obj)) { + return obj.map(function (t) { + return new lunr.Token( + lunr.utils.asString(t).toLowerCase(), + lunr.utils.clone(metadata) + ) + }) + } + + var str = obj.toString().toLowerCase(), + len = str.length, + tokens = [] + + for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) { + var char = str.charAt(sliceEnd), + sliceLength = sliceEnd - sliceStart + + if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) { + + if (sliceLength > 0) { + var tokenMetadata = lunr.utils.clone(metadata) || {} + tokenMetadata["position"] = [sliceStart, sliceLength] + tokenMetadata["index"] = tokens.length + + tokens.push( + new lunr.Token ( + str.slice(sliceStart, sliceEnd), + tokenMetadata + ) + ) + } + + sliceStart = sliceEnd + 1 + } + + } + + return tokens +} + +/** + * The separator used to split a string into tokens. Override this property to change the behaviour of + * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens. + * + * @static + * @see lunr.tokenizer + */ +lunr.tokenizer.separator = /[\s\-]+/ +/*! + * lunr.Pipeline + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.Pipelines maintain an ordered list of functions to be applied to all + * tokens in documents entering the search index and queries being ran against + * the index. + * + * An instance of lunr.Index created with the lunr shortcut will contain a + * pipeline with a stop word filter and an English language stemmer. Extra + * functions can be added before or after either of these functions or these + * default functions can be removed. + * + * When run the pipeline will call each function in turn, passing a token, the + * index of that token in the original list of all tokens and finally a list of + * all the original tokens. + * + * The output of functions in the pipeline will be passed to the next function + * in the pipeline. To exclude a token from entering the index the function + * should return undefined, the rest of the pipeline will not be called with + * this token. + * + * For serialisation of pipelines to work, all functions used in an instance of + * a pipeline should be registered with lunr.Pipeline. Registered functions can + * then be loaded. If trying to load a serialised pipeline that uses functions + * that are not registered an error will be thrown. + * + * If not planning on serialising the pipeline then registering pipeline functions + * is not necessary. + * + * @constructor + */ +lunr.Pipeline = function () { + this._stack = [] +} + +lunr.Pipeline.registeredFunctions = Object.create(null) + +/** + * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token + * string as well as all known metadata. A pipeline function can mutate the token string + * or mutate (or add) metadata for a given token. + * + * A pipeline function can indicate that the passed token should be discarded by returning + * null, undefined or an empty string. This token will not be passed to any downstream pipeline + * functions and will not be added to the index. + * + * Multiple tokens can be returned by returning an array of tokens. Each token will be passed + * to any downstream pipeline functions and all will returned tokens will be added to the index. + * + * Any number of pipeline functions may be chained together using a lunr.Pipeline. + * + * @interface lunr.PipelineFunction + * @param {lunr.Token} token - A token from the document being processed. + * @param {number} i - The index of this token in the complete list of tokens for this document/field. + * @param {lunr.Token[]} tokens - All tokens for this document/field. + * @returns {(?lunr.Token|lunr.Token[])} + */ + +/** + * Register a function with the pipeline. + * + * Functions that are used in the pipeline should be registered if the pipeline + * needs to be serialised, or a serialised pipeline needs to be loaded. + * + * Registering a function does not add it to a pipeline, functions must still be + * added to instances of the pipeline for them to be used when running a pipeline. + * + * @param {lunr.PipelineFunction} fn - The function to check for. + * @param {String} label - The label to register this function with + */ +lunr.Pipeline.registerFunction = function (fn, label) { + if (label in this.registeredFunctions) { + lunr.utils.warn('Overwriting existing registered function: ' + label) + } + + fn.label = label + lunr.Pipeline.registeredFunctions[fn.label] = fn +} + +/** + * Warns if the function is not registered as a Pipeline function. + * + * @param {lunr.PipelineFunction} fn - The function to check for. + * @private + */ +lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) { + var isRegistered = fn.label && (fn.label in this.registeredFunctions) + + if (!isRegistered) { + lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn) + } +} + +/** + * Loads a previously serialised pipeline. + * + * All functions to be loaded must already be registered with lunr.Pipeline. + * If any function from the serialised data has not been registered then an + * error will be thrown. + * + * @param {Object} serialised - The serialised pipeline to load. + * @returns {lunr.Pipeline} + */ +lunr.Pipeline.load = function (serialised) { + var pipeline = new lunr.Pipeline + + serialised.forEach(function (fnName) { + var fn = lunr.Pipeline.registeredFunctions[fnName] + + if (fn) { + pipeline.add(fn) + } else { + throw new Error('Cannot load unregistered function: ' + fnName) + } + }) + + return pipeline +} + +/** + * Adds new functions to the end of the pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline. + */ +lunr.Pipeline.prototype.add = function () { + var fns = Array.prototype.slice.call(arguments) + + fns.forEach(function (fn) { + lunr.Pipeline.warnIfFunctionNotRegistered(fn) + this._stack.push(fn) + }, this) +} + +/** + * Adds a single function after a function that already exists in the + * pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. + * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. + */ +lunr.Pipeline.prototype.after = function (existingFn, newFn) { + lunr.Pipeline.warnIfFunctionNotRegistered(newFn) + + var pos = this._stack.indexOf(existingFn) + if (pos == -1) { + throw new Error('Cannot find existingFn') + } + + pos = pos + 1 + this._stack.splice(pos, 0, newFn) +} + +/** + * Adds a single function before a function that already exists in the + * pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. + * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. + */ +lunr.Pipeline.prototype.before = function (existingFn, newFn) { + lunr.Pipeline.warnIfFunctionNotRegistered(newFn) + + var pos = this._stack.indexOf(existingFn) + if (pos == -1) { + throw new Error('Cannot find existingFn') + } + + this._stack.splice(pos, 0, newFn) +} + +/** + * Removes a function from the pipeline. + * + * @param {lunr.PipelineFunction} fn The function to remove from the pipeline. + */ +lunr.Pipeline.prototype.remove = function (fn) { + var pos = this._stack.indexOf(fn) + if (pos == -1) { + return + } + + this._stack.splice(pos, 1) +} + +/** + * Runs the current list of functions that make up the pipeline against the + * passed tokens. + * + * @param {Array} tokens The tokens to run through the pipeline. + * @returns {Array} + */ +lunr.Pipeline.prototype.run = function (tokens) { + var stackLength = this._stack.length + + for (var i = 0; i < stackLength; i++) { + var fn = this._stack[i] + var memo = [] + + for (var j = 0; j < tokens.length; j++) { + var result = fn(tokens[j], j, tokens) + + if (result === null || result === void 0 || result === '') continue + + if (Array.isArray(result)) { + for (var k = 0; k < result.length; k++) { + memo.push(result[k]) + } + } else { + memo.push(result) + } + } + + tokens = memo + } + + return tokens +} + +/** + * Convenience method for passing a string through a pipeline and getting + * strings out. This method takes care of wrapping the passed string in a + * token and mapping the resulting tokens back to strings. + * + * @param {string} str - The string to pass through the pipeline. + * @param {?object} metadata - Optional metadata to associate with the token + * passed to the pipeline. + * @returns {string[]} + */ +lunr.Pipeline.prototype.runString = function (str, metadata) { + var token = new lunr.Token (str, metadata) + + return this.run([token]).map(function (t) { + return t.toString() + }) +} + +/** + * Resets the pipeline by removing any existing processors. + * + */ +lunr.Pipeline.prototype.reset = function () { + this._stack = [] +} + +/** + * Returns a representation of the pipeline ready for serialisation. + * + * Logs a warning if the function has not been registered. + * + * @returns {Array} + */ +lunr.Pipeline.prototype.toJSON = function () { + return this._stack.map(function (fn) { + lunr.Pipeline.warnIfFunctionNotRegistered(fn) + + return fn.label + }) +} +/*! + * lunr.Vector + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A vector is used to construct the vector space of documents and queries. These + * vectors support operations to determine the similarity between two documents or + * a document and a query. + * + * Normally no parameters are required for initializing a vector, but in the case of + * loading a previously dumped vector the raw elements can be provided to the constructor. + * + * For performance reasons vectors are implemented with a flat array, where an elements + * index is immediately followed by its value. E.g. [index, value, index, value]. This + * allows the underlying array to be as sparse as possible and still offer decent + * performance when being used for vector calculations. + * + * @constructor + * @param {Number[]} [elements] - The flat list of element index and element value pairs. + */ +lunr.Vector = function (elements) { + this._magnitude = 0 + this.elements = elements || [] +} + + +/** + * Calculates the position within the vector to insert a given index. + * + * This is used internally by insert and upsert. If there are duplicate indexes then + * the position is returned as if the value for that index were to be updated, but it + * is the callers responsibility to check whether there is a duplicate at that index + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @returns {Number} + */ +lunr.Vector.prototype.positionForIndex = function (index) { + // For an empty vector the tuple can be inserted at the beginning + if (this.elements.length == 0) { + return 0 + } + + var start = 0, + end = this.elements.length / 2, + sliceLength = end - start, + pivotPoint = Math.floor(sliceLength / 2), + pivotIndex = this.elements[pivotPoint * 2] + + while (sliceLength > 1) { + if (pivotIndex < index) { + start = pivotPoint + } + + if (pivotIndex > index) { + end = pivotPoint + } + + if (pivotIndex == index) { + break + } + + sliceLength = end - start + pivotPoint = start + Math.floor(sliceLength / 2) + pivotIndex = this.elements[pivotPoint * 2] + } + + if (pivotIndex == index) { + return pivotPoint * 2 + } + + if (pivotIndex > index) { + return pivotPoint * 2 + } + + if (pivotIndex < index) { + return (pivotPoint + 1) * 2 + } +} + +/** + * Inserts an element at an index within the vector. + * + * Does not allow duplicates, will throw an error if there is already an entry + * for this index. + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @param {Number} val - The value to be inserted into the vector. + */ +lunr.Vector.prototype.insert = function (insertIdx, val) { + this.upsert(insertIdx, val, function () { + throw "duplicate index" + }) +} + +/** + * Inserts or updates an existing index within the vector. + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @param {Number} val - The value to be inserted into the vector. + * @param {function} fn - A function that is called for updates, the existing value and the + * requested value are passed as arguments + */ +lunr.Vector.prototype.upsert = function (insertIdx, val, fn) { + this._magnitude = 0 + var position = this.positionForIndex(insertIdx) + + if (this.elements[position] == insertIdx) { + this.elements[position + 1] = fn(this.elements[position + 1], val) + } else { + this.elements.splice(position, 0, insertIdx, val) + } +} + +/** + * Calculates the magnitude of this vector. + * + * @returns {Number} + */ +lunr.Vector.prototype.magnitude = function () { + if (this._magnitude) return this._magnitude + + var sumOfSquares = 0, + elementsLength = this.elements.length + + for (var i = 1; i < elementsLength; i += 2) { + var val = this.elements[i] + sumOfSquares += val * val + } + + return this._magnitude = Math.sqrt(sumOfSquares) +} + +/** + * Calculates the dot product of this vector and another vector. + * + * @param {lunr.Vector} otherVector - The vector to compute the dot product with. + * @returns {Number} + */ +lunr.Vector.prototype.dot = function (otherVector) { + var dotProduct = 0, + a = this.elements, b = otherVector.elements, + aLen = a.length, bLen = b.length, + aVal = 0, bVal = 0, + i = 0, j = 0 + + while (i < aLen && j < bLen) { + aVal = a[i], bVal = b[j] + if (aVal < bVal) { + i += 2 + } else if (aVal > bVal) { + j += 2 + } else if (aVal == bVal) { + dotProduct += a[i + 1] * b[j + 1] + i += 2 + j += 2 + } + } + + return dotProduct +} + +/** + * Calculates the similarity between this vector and another vector. + * + * @param {lunr.Vector} otherVector - The other vector to calculate the + * similarity with. + * @returns {Number} + */ +lunr.Vector.prototype.similarity = function (otherVector) { + return this.dot(otherVector) / this.magnitude() || 0 +} + +/** + * Converts the vector to an array of the elements within the vector. + * + * @returns {Number[]} + */ +lunr.Vector.prototype.toArray = function () { + var output = new Array (this.elements.length / 2) + + for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) { + output[j] = this.elements[i] + } + + return output +} + +/** + * A JSON serializable representation of the vector. + * + * @returns {Number[]} + */ +lunr.Vector.prototype.toJSON = function () { + return this.elements +} +/* eslint-disable */ +/*! + * lunr.stemmer + * Copyright (C) 2020 Oliver Nightingale + * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt + */ + +/** + * lunr.stemmer is an english language stemmer, this is a JavaScript + * implementation of the PorterStemmer taken from http://tartarus.org/~martin + * + * @static + * @implements {lunr.PipelineFunction} + * @param {lunr.Token} token - The string to stem + * @returns {lunr.Token} + * @see {@link lunr.Pipeline} + * @function + */ +lunr.stemmer = (function(){ + var step2list = { + "ational" : "ate", + "tional" : "tion", + "enci" : "ence", + "anci" : "ance", + "izer" : "ize", + "bli" : "ble", + "alli" : "al", + "entli" : "ent", + "eli" : "e", + "ousli" : "ous", + "ization" : "ize", + "ation" : "ate", + "ator" : "ate", + "alism" : "al", + "iveness" : "ive", + "fulness" : "ful", + "ousness" : "ous", + "aliti" : "al", + "iviti" : "ive", + "biliti" : "ble", + "logi" : "log" + }, + + step3list = { + "icate" : "ic", + "ative" : "", + "alize" : "al", + "iciti" : "ic", + "ical" : "ic", + "ful" : "", + "ness" : "" + }, + + c = "[^aeiou]", // consonant + v = "[aeiouy]", // vowel + C = c + "[^aeiouy]*", // consonant sequence + V = v + "[aeiou]*", // vowel sequence + + mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0 + meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1 + mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1 + s_v = "^(" + C + ")?" + v; // vowel in stem + + var re_mgr0 = new RegExp(mgr0); + var re_mgr1 = new RegExp(mgr1); + var re_meq1 = new RegExp(meq1); + var re_s_v = new RegExp(s_v); + + var re_1a = /^(.+?)(ss|i)es$/; + var re2_1a = /^(.+?)([^s])s$/; + var re_1b = /^(.+?)eed$/; + var re2_1b = /^(.+?)(ed|ing)$/; + var re_1b_2 = /.$/; + var re2_1b_2 = /(at|bl|iz)$/; + var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$"); + var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + + var re_1c = /^(.+?[^aeiou])y$/; + var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + + var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + + var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + var re2_4 = /^(.+?)(s|t)(ion)$/; + + var re_5 = /^(.+?)e$/; + var re_5_1 = /ll$/; + var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + + var porterStemmer = function porterStemmer(w) { + var stem, + suffix, + firstch, + re, + re2, + re3, + re4; + + if (w.length < 3) { return w; } + + firstch = w.substr(0,1); + if (firstch == "y") { + w = firstch.toUpperCase() + w.substr(1); + } + + // Step 1a + re = re_1a + re2 = re2_1a; + + if (re.test(w)) { w = w.replace(re,"$1$2"); } + else if (re2.test(w)) { w = w.replace(re2,"$1$2"); } + + // Step 1b + re = re_1b; + re2 = re2_1b; + if (re.test(w)) { + var fp = re.exec(w); + re = re_mgr0; + if (re.test(fp[1])) { + re = re_1b_2; + w = w.replace(re,""); + } + } else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = re_s_v; + if (re2.test(stem)) { + w = stem; + re2 = re2_1b_2; + re3 = re3_1b_2; + re4 = re4_1b_2; + if (re2.test(w)) { w = w + "e"; } + else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); } + else if (re4.test(w)) { w = w + "e"; } + } + } + + // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say) + re = re_1c; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + w = stem + "i"; + } + + // Step 2 + re = re_2; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = re_mgr0; + if (re.test(stem)) { + w = stem + step2list[suffix]; + } + } + + // Step 3 + re = re_3; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = re_mgr0; + if (re.test(stem)) { + w = stem + step3list[suffix]; + } + } + + // Step 4 + re = re_4; + re2 = re2_4; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = re_mgr1; + if (re.test(stem)) { + w = stem; + } + } else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = re_mgr1; + if (re2.test(stem)) { + w = stem; + } + } + + // Step 5 + re = re_5; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = re_mgr1; + re2 = re_meq1; + re3 = re3_5; + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) { + w = stem; + } + } + + re = re_5_1; + re2 = re_mgr1; + if (re.test(w) && re2.test(w)) { + re = re_1b_2; + w = w.replace(re,""); + } + + // and turn initial Y back to y + + if (firstch == "y") { + w = firstch.toLowerCase() + w.substr(1); + } + + return w; + }; + + return function (token) { + return token.update(porterStemmer); + } +})(); + +lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') +/*! + * lunr.stopWordFilter + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.generateStopWordFilter builds a stopWordFilter function from the provided + * list of stop words. + * + * The built in lunr.stopWordFilter is built using this generator and can be used + * to generate custom stopWordFilters for applications or non English languages. + * + * @function + * @param {Array} token The token to pass through the filter + * @returns {lunr.PipelineFunction} + * @see lunr.Pipeline + * @see lunr.stopWordFilter + */ +lunr.generateStopWordFilter = function (stopWords) { + var words = stopWords.reduce(function (memo, stopWord) { + memo[stopWord] = stopWord + return memo + }, {}) + + return function (token) { + if (token && words[token.toString()] !== token.toString()) return token + } +} + +/** + * lunr.stopWordFilter is an English language stop word list filter, any words + * contained in the list will not be passed through the filter. + * + * This is intended to be used in the Pipeline. If the token does not pass the + * filter then undefined will be returned. + * + * @function + * @implements {lunr.PipelineFunction} + * @params {lunr.Token} token - A token to check for being a stop word. + * @returns {lunr.Token} + * @see {@link lunr.Pipeline} + */ +lunr.stopWordFilter = lunr.generateStopWordFilter([ + 'a', + 'able', + 'about', + 'across', + 'after', + 'all', + 'almost', + 'also', + 'am', + 'among', + 'an', + 'and', + 'any', + 'are', + 'as', + 'at', + 'be', + 'because', + 'been', + 'but', + 'by', + 'can', + 'cannot', + 'could', + 'dear', + 'did', + 'do', + 'does', + 'either', + 'else', + 'ever', + 'every', + 'for', + 'from', + 'get', + 'got', + 'had', + 'has', + 'have', + 'he', + 'her', + 'hers', + 'him', + 'his', + 'how', + 'however', + 'i', + 'if', + 'in', + 'into', + 'is', + 'it', + 'its', + 'just', + 'least', + 'let', + 'like', + 'likely', + 'may', + 'me', + 'might', + 'most', + 'must', + 'my', + 'neither', + 'no', + 'nor', + 'not', + 'of', + 'off', + 'often', + 'on', + 'only', + 'or', + 'other', + 'our', + 'own', + 'rather', + 'said', + 'say', + 'says', + 'she', + 'should', + 'since', + 'so', + 'some', + 'than', + 'that', + 'the', + 'their', + 'them', + 'then', + 'there', + 'these', + 'they', + 'this', + 'tis', + 'to', + 'too', + 'twas', + 'us', + 'wants', + 'was', + 'we', + 'were', + 'what', + 'when', + 'where', + 'which', + 'while', + 'who', + 'whom', + 'why', + 'will', + 'with', + 'would', + 'yet', + 'you', + 'your' +]) + +lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter') +/*! + * lunr.trimmer + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.trimmer is a pipeline function for trimming non word + * characters from the beginning and end of tokens before they + * enter the index. + * + * This implementation may not work correctly for non latin + * characters and should either be removed or adapted for use + * with languages with non-latin characters. + * + * @static + * @implements {lunr.PipelineFunction} + * @param {lunr.Token} token The token to pass through the filter + * @returns {lunr.Token} + * @see lunr.Pipeline + */ +lunr.trimmer = function (token) { + return token.update(function (s) { + return s.replace(/^\W+/, '').replace(/\W+$/, '') + }) +} + +lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer') +/*! + * lunr.TokenSet + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A token set is used to store the unique list of all tokens + * within an index. Token sets are also used to represent an + * incoming query to the index, this query token set and index + * token set are then intersected to find which tokens to look + * up in the inverted index. + * + * A token set can hold multiple tokens, as in the case of the + * index token set, or it can hold a single token as in the + * case of a simple query token set. + * + * Additionally token sets are used to perform wildcard matching. + * Leading, contained and trailing wildcards are supported, and + * from this edit distance matching can also be provided. + * + * Token sets are implemented as a minimal finite state automata, + * where both common prefixes and suffixes are shared between tokens. + * This helps to reduce the space used for storing the token set. + * + * @constructor + */ +lunr.TokenSet = function () { + this.final = false + this.edges = {} + this.id = lunr.TokenSet._nextId + lunr.TokenSet._nextId += 1 +} + +/** + * Keeps track of the next, auto increment, identifier to assign + * to a new tokenSet. + * + * TokenSets require a unique identifier to be correctly minimised. + * + * @private + */ +lunr.TokenSet._nextId = 1 + +/** + * Creates a TokenSet instance from the given sorted array of words. + * + * @param {String[]} arr - A sorted array of strings to create the set from. + * @returns {lunr.TokenSet} + * @throws Will throw an error if the input array is not sorted. + */ +lunr.TokenSet.fromArray = function (arr) { + var builder = new lunr.TokenSet.Builder + + for (var i = 0, len = arr.length; i < len; i++) { + builder.insert(arr[i]) + } + + builder.finish() + return builder.root +} + +/** + * Creates a token set from a query clause. + * + * @private + * @param {Object} clause - A single clause from lunr.Query. + * @param {string} clause.term - The query clause term. + * @param {number} [clause.editDistance] - The optional edit distance for the term. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.fromClause = function (clause) { + if ('editDistance' in clause) { + return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance) + } else { + return lunr.TokenSet.fromString(clause.term) + } +} + +/** + * Creates a token set representing a single string with a specified + * edit distance. + * + * Insertions, deletions, substitutions and transpositions are each + * treated as an edit distance of 1. + * + * Increasing the allowed edit distance will have a dramatic impact + * on the performance of both creating and intersecting these TokenSets. + * It is advised to keep the edit distance less than 3. + * + * @param {string} str - The string to create the token set from. + * @param {number} editDistance - The allowed edit distance to match. + * @returns {lunr.Vector} + */ +lunr.TokenSet.fromFuzzyString = function (str, editDistance) { + var root = new lunr.TokenSet + + var stack = [{ + node: root, + editsRemaining: editDistance, + str: str + }] + + while (stack.length) { + var frame = stack.pop() + + // no edit + if (frame.str.length > 0) { + var char = frame.str.charAt(0), + noEditNode + + if (char in frame.node.edges) { + noEditNode = frame.node.edges[char] + } else { + noEditNode = new lunr.TokenSet + frame.node.edges[char] = noEditNode + } + + if (frame.str.length == 1) { + noEditNode.final = true + } + + stack.push({ + node: noEditNode, + editsRemaining: frame.editsRemaining, + str: frame.str.slice(1) + }) + } + + if (frame.editsRemaining == 0) { + continue + } + + // insertion + if ("*" in frame.node.edges) { + var insertionNode = frame.node.edges["*"] + } else { + var insertionNode = new lunr.TokenSet + frame.node.edges["*"] = insertionNode + } + + if (frame.str.length == 0) { + insertionNode.final = true + } + + stack.push({ + node: insertionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str + }) + + // deletion + // can only do a deletion if we have enough edits remaining + // and if there are characters left to delete in the string + if (frame.str.length > 1) { + stack.push({ + node: frame.node, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) + } + + // deletion + // just removing the last character from the str + if (frame.str.length == 1) { + frame.node.final = true + } + + // substitution + // can only do a substitution if we have enough edits remaining + // and if there are characters left to substitute + if (frame.str.length >= 1) { + if ("*" in frame.node.edges) { + var substitutionNode = frame.node.edges["*"] + } else { + var substitutionNode = new lunr.TokenSet + frame.node.edges["*"] = substitutionNode + } + + if (frame.str.length == 1) { + substitutionNode.final = true + } + + stack.push({ + node: substitutionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) + } + + // transposition + // can only do a transposition if there are edits remaining + // and there are enough characters to transpose + if (frame.str.length > 1) { + var charA = frame.str.charAt(0), + charB = frame.str.charAt(1), + transposeNode + + if (charB in frame.node.edges) { + transposeNode = frame.node.edges[charB] + } else { + transposeNode = new lunr.TokenSet + frame.node.edges[charB] = transposeNode + } + + if (frame.str.length == 1) { + transposeNode.final = true + } + + stack.push({ + node: transposeNode, + editsRemaining: frame.editsRemaining - 1, + str: charA + frame.str.slice(2) + }) + } + } + + return root +} + +/** + * Creates a TokenSet from a string. + * + * The string may contain one or more wildcard characters (*) + * that will allow wildcard matching when intersecting with + * another TokenSet. + * + * @param {string} str - The string to create a TokenSet from. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.fromString = function (str) { + var node = new lunr.TokenSet, + root = node + + /* + * Iterates through all characters within the passed string + * appending a node for each character. + * + * When a wildcard character is found then a self + * referencing edge is introduced to continually match + * any number of any characters. + */ + for (var i = 0, len = str.length; i < len; i++) { + var char = str[i], + final = (i == len - 1) + + if (char == "*") { + node.edges[char] = node + node.final = final + + } else { + var next = new lunr.TokenSet + next.final = final + + node.edges[char] = next + node = next + } + } + + return root +} + +/** + * Converts this TokenSet into an array of strings + * contained within the TokenSet. + * + * This is not intended to be used on a TokenSet that + * contains wildcards, in these cases the results are + * undefined and are likely to cause an infinite loop. + * + * @returns {string[]} + */ +lunr.TokenSet.prototype.toArray = function () { + var words = [] + + var stack = [{ + prefix: "", + node: this + }] + + while (stack.length) { + var frame = stack.pop(), + edges = Object.keys(frame.node.edges), + len = edges.length + + if (frame.node.final) { + /* In Safari, at this point the prefix is sometimes corrupted, see: + * https://github.com/olivernn/lunr.js/issues/279 Calling any + * String.prototype method forces Safari to "cast" this string to what + * it's supposed to be, fixing the bug. */ + frame.prefix.charAt(0) + words.push(frame.prefix) + } + + for (var i = 0; i < len; i++) { + var edge = edges[i] + + stack.push({ + prefix: frame.prefix.concat(edge), + node: frame.node.edges[edge] + }) + } + } + + return words +} + +/** + * Generates a string representation of a TokenSet. + * + * This is intended to allow TokenSets to be used as keys + * in objects, largely to aid the construction and minimisation + * of a TokenSet. As such it is not designed to be a human + * friendly representation of the TokenSet. + * + * @returns {string} + */ +lunr.TokenSet.prototype.toString = function () { + // NOTE: Using Object.keys here as this.edges is very likely + // to enter 'hash-mode' with many keys being added + // + // avoiding a for-in loop here as it leads to the function + // being de-optimised (at least in V8). From some simple + // benchmarks the performance is comparable, but allowing + // V8 to optimize may mean easy performance wins in the future. + + if (this._str) { + return this._str + } + + var str = this.final ? '1' : '0', + labels = Object.keys(this.edges).sort(), + len = labels.length + + for (var i = 0; i < len; i++) { + var label = labels[i], + node = this.edges[label] + + str = str + label + node.id + } + + return str +} + +/** + * Returns a new TokenSet that is the intersection of + * this TokenSet and the passed TokenSet. + * + * This intersection will take into account any wildcards + * contained within the TokenSet. + * + * @param {lunr.TokenSet} b - An other TokenSet to intersect with. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.prototype.intersect = function (b) { + var output = new lunr.TokenSet, + frame = undefined + + var stack = [{ + qNode: b, + output: output, + node: this + }] + + while (stack.length) { + frame = stack.pop() + + // NOTE: As with the #toString method, we are using + // Object.keys and a for loop instead of a for-in loop + // as both of these objects enter 'hash' mode, causing + // the function to be de-optimised in V8 + var qEdges = Object.keys(frame.qNode.edges), + qLen = qEdges.length, + nEdges = Object.keys(frame.node.edges), + nLen = nEdges.length + + for (var q = 0; q < qLen; q++) { + var qEdge = qEdges[q] + + for (var n = 0; n < nLen; n++) { + var nEdge = nEdges[n] + + if (nEdge == qEdge || qEdge == '*') { + var node = frame.node.edges[nEdge], + qNode = frame.qNode.edges[qEdge], + final = node.final && qNode.final, + next = undefined + + if (nEdge in frame.output.edges) { + // an edge already exists for this character + // no need to create a new node, just set the finality + // bit unless this node is already final + next = frame.output.edges[nEdge] + next.final = next.final || final + + } else { + // no edge exists yet, must create one + // set the finality bit and insert it + // into the output + next = new lunr.TokenSet + next.final = final + frame.output.edges[nEdge] = next + } + + stack.push({ + qNode: qNode, + output: next, + node: node + }) + } + } + } + } + + return output +} +lunr.TokenSet.Builder = function () { + this.previousWord = "" + this.root = new lunr.TokenSet + this.uncheckedNodes = [] + this.minimizedNodes = {} +} + +lunr.TokenSet.Builder.prototype.insert = function (word) { + var node, + commonPrefix = 0 + + if (word < this.previousWord) { + throw new Error ("Out of order word insertion") + } + + for (var i = 0; i < word.length && i < this.previousWord.length; i++) { + if (word[i] != this.previousWord[i]) break + commonPrefix++ + } + + this.minimize(commonPrefix) + + if (this.uncheckedNodes.length == 0) { + node = this.root + } else { + node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child + } + + for (var i = commonPrefix; i < word.length; i++) { + var nextNode = new lunr.TokenSet, + char = word[i] + + node.edges[char] = nextNode + + this.uncheckedNodes.push({ + parent: node, + char: char, + child: nextNode + }) + + node = nextNode + } + + node.final = true + this.previousWord = word +} + +lunr.TokenSet.Builder.prototype.finish = function () { + this.minimize(0) +} + +lunr.TokenSet.Builder.prototype.minimize = function (downTo) { + for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) { + var node = this.uncheckedNodes[i], + childKey = node.child.toString() + + if (childKey in this.minimizedNodes) { + node.parent.edges[node.char] = this.minimizedNodes[childKey] + } else { + // Cache the key for this node since + // we know it can't change anymore + node.child._str = childKey + + this.minimizedNodes[childKey] = node.child + } + + this.uncheckedNodes.pop() + } +} +/*! + * lunr.Index + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * An index contains the built index of all documents and provides a query interface + * to the index. + * + * Usually instances of lunr.Index will not be created using this constructor, instead + * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be + * used to load previously built and serialized indexes. + * + * @constructor + * @param {Object} attrs - The attributes of the built search index. + * @param {Object} attrs.invertedIndex - An index of term/field to document reference. + * @param {Object} attrs.fieldVectors - Field vectors + * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens. + * @param {string[]} attrs.fields - The names of indexed document fields. + * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms. + */ +lunr.Index = function (attrs) { + this.invertedIndex = attrs.invertedIndex + this.fieldVectors = attrs.fieldVectors + this.tokenSet = attrs.tokenSet + this.fields = attrs.fields + this.pipeline = attrs.pipeline +} + +/** + * A result contains details of a document matching a search query. + * @typedef {Object} lunr.Index~Result + * @property {string} ref - The reference of the document this result represents. + * @property {number} score - A number between 0 and 1 representing how similar this document is to the query. + * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match. + */ + +/** + * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple + * query language which itself is parsed into an instance of lunr.Query. + * + * For programmatically building queries it is advised to directly use lunr.Query, the query language + * is best used for human entered text rather than program generated text. + * + * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported + * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello' + * or 'world', though those that contain both will rank higher in the results. + * + * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can + * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding + * wildcards will increase the number of documents that will be found but can also have a negative + * impact on query performance, especially with wildcards at the beginning of a term. + * + * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term + * hello in the title field will match this query. Using a field not present in the index will lead + * to an error being thrown. + * + * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term + * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported + * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2. + * Avoid large values for edit distance to improve query performance. + * + * Each term also supports a presence modifier. By default a term's presence in document is optional, however + * this can be changed to either required or prohibited. For a term's presence to be required in a document the + * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and + * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not + * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'. + * + * To escape special characters the backslash character '\' can be used, this allows searches to include + * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead + * of attempting to apply a boost of 2 to the search term "foo". + * + * @typedef {string} lunr.Index~QueryString + * @example Simple single term query + * hello + * @example Multiple term query + * hello world + * @example term scoped to a field + * title:hello + * @example term with a boost of 10 + * hello^10 + * @example term with an edit distance of 2 + * hello~2 + * @example terms with presence modifiers + * -foo +bar baz + */ + +/** + * Performs a search against the index using lunr query syntax. + * + * Results will be returned sorted by their score, the most relevant results + * will be returned first. For details on how the score is calculated, please see + * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}. + * + * For more programmatic querying use lunr.Index#query. + * + * @param {lunr.Index~QueryString} queryString - A string containing a lunr query. + * @throws {lunr.QueryParseError} If the passed query string cannot be parsed. + * @returns {lunr.Index~Result[]} + */ +lunr.Index.prototype.search = function (queryString) { + return this.query(function (query) { + var parser = new lunr.QueryParser(queryString, query) + parser.parse() + }) +} + +/** + * A query builder callback provides a query object to be used to express + * the query to perform on the index. + * + * @callback lunr.Index~queryBuilder + * @param {lunr.Query} query - The query object to build up. + * @this lunr.Query + */ + +/** + * Performs a query against the index using the yielded lunr.Query object. + * + * If performing programmatic queries against the index, this method is preferred + * over lunr.Index#search so as to avoid the additional query parsing overhead. + * + * A query object is yielded to the supplied function which should be used to + * express the query to be run against the index. + * + * Note that although this function takes a callback parameter it is _not_ an + * asynchronous operation, the callback is just yielded a query object to be + * customized. + * + * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query. + * @returns {lunr.Index~Result[]} + */ +lunr.Index.prototype.query = function (fn) { + // for each query clause + // * process terms + // * expand terms from token set + // * find matching documents and metadata + // * get document vectors + // * score documents + + var query = new lunr.Query(this.fields), + matchingFields = Object.create(null), + queryVectors = Object.create(null), + termFieldCache = Object.create(null), + requiredMatches = Object.create(null), + prohibitedMatches = Object.create(null) + + /* + * To support field level boosts a query vector is created per + * field. An empty vector is eagerly created to support negated + * queries. + */ + for (var i = 0; i < this.fields.length; i++) { + queryVectors[this.fields[i]] = new lunr.Vector + } + + fn.call(query, query) + + for (var i = 0; i < query.clauses.length; i++) { + /* + * Unless the pipeline has been disabled for this term, which is + * the case for terms with wildcards, we need to pass the clause + * term through the search pipeline. A pipeline returns an array + * of processed terms. Pipeline functions may expand the passed + * term, which means we may end up performing multiple index lookups + * for a single query term. + */ + var clause = query.clauses[i], + terms = null, + clauseMatches = lunr.Set.empty + + if (clause.usePipeline) { + terms = this.pipeline.runString(clause.term, { + fields: clause.fields + }) + } else { + terms = [clause.term] + } + + for (var m = 0; m < terms.length; m++) { + var term = terms[m] + + /* + * Each term returned from the pipeline needs to use the same query + * clause object, e.g. the same boost and or edit distance. The + * simplest way to do this is to re-use the clause object but mutate + * its term property. + */ + clause.term = term + + /* + * From the term in the clause we create a token set which will then + * be used to intersect the indexes token set to get a list of terms + * to lookup in the inverted index + */ + var termTokenSet = lunr.TokenSet.fromClause(clause), + expandedTerms = this.tokenSet.intersect(termTokenSet).toArray() + + /* + * If a term marked as required does not exist in the tokenSet it is + * impossible for the search to return any matches. We set all the field + * scoped required matches set to empty and stop examining any further + * clauses. + */ + if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = lunr.Set.empty + } + + break + } + + for (var j = 0; j < expandedTerms.length; j++) { + /* + * For each term get the posting and termIndex, this is required for + * building the query vector. + */ + var expandedTerm = expandedTerms[j], + posting = this.invertedIndex[expandedTerm], + termIndex = posting._index + + for (var k = 0; k < clause.fields.length; k++) { + /* + * For each field that this query term is scoped by (by default + * all fields are in scope) we need to get all the document refs + * that have this term in that field. + * + * The posting is the entry in the invertedIndex for the matching + * term from above. + */ + var field = clause.fields[k], + fieldPosting = posting[field], + matchingDocumentRefs = Object.keys(fieldPosting), + termField = expandedTerm + "/" + field, + matchingDocumentsSet = new lunr.Set(matchingDocumentRefs) + + /* + * if the presence of this term is required ensure that the matching + * documents are added to the set of required matches for this clause. + * + */ + if (clause.presence == lunr.Query.presence.REQUIRED) { + clauseMatches = clauseMatches.union(matchingDocumentsSet) + + if (requiredMatches[field] === undefined) { + requiredMatches[field] = lunr.Set.complete + } + } + + /* + * if the presence of this term is prohibited ensure that the matching + * documents are added to the set of prohibited matches for this field, + * creating that set if it does not yet exist. + */ + if (clause.presence == lunr.Query.presence.PROHIBITED) { + if (prohibitedMatches[field] === undefined) { + prohibitedMatches[field] = lunr.Set.empty + } + + prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet) + + /* + * Prohibited matches should not be part of the query vector used for + * similarity scoring and no metadata should be extracted so we continue + * to the next field + */ + continue + } + + /* + * The query field vector is populated using the termIndex found for + * the term and a unit value with the appropriate boost applied. + * Using upsert because there could already be an entry in the vector + * for the term we are working with. In that case we just add the scores + * together. + */ + queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b }) + + /** + * If we've already seen this term, field combo then we've already collected + * the matching documents and metadata, no need to go through all that again + */ + if (termFieldCache[termField]) { + continue + } + + for (var l = 0; l < matchingDocumentRefs.length; l++) { + /* + * All metadata for this term/field/document triple + * are then extracted and collected into an instance + * of lunr.MatchData ready to be returned in the query + * results + */ + var matchingDocumentRef = matchingDocumentRefs[l], + matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field), + metadata = fieldPosting[matchingDocumentRef], + fieldMatch + + if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) { + matchingFields[matchingFieldRef] = new lunr.MatchData (expandedTerm, field, metadata) + } else { + fieldMatch.add(expandedTerm, field, metadata) + } + + } + + termFieldCache[termField] = true + } + } + } + + /** + * If the presence was required we need to update the requiredMatches field sets. + * We do this after all fields for the term have collected their matches because + * the clause terms presence is required in _any_ of the fields not _all_ of the + * fields. + */ + if (clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = requiredMatches[field].intersect(clauseMatches) + } + } + } + + /** + * Need to combine the field scoped required and prohibited + * matching documents into a global set of required and prohibited + * matches + */ + var allRequiredMatches = lunr.Set.complete, + allProhibitedMatches = lunr.Set.empty + + for (var i = 0; i < this.fields.length; i++) { + var field = this.fields[i] + + if (requiredMatches[field]) { + allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field]) + } + + if (prohibitedMatches[field]) { + allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field]) + } + } + + var matchingFieldRefs = Object.keys(matchingFields), + results = [], + matches = Object.create(null) + + /* + * If the query is negated (contains only prohibited terms) + * we need to get _all_ fieldRefs currently existing in the + * index. This is only done when we know that the query is + * entirely prohibited terms to avoid any cost of getting all + * fieldRefs unnecessarily. + * + * Additionally, blank MatchData must be created to correctly + * populate the results. + */ + if (query.isNegated()) { + matchingFieldRefs = Object.keys(this.fieldVectors) + + for (var i = 0; i < matchingFieldRefs.length; i++) { + var matchingFieldRef = matchingFieldRefs[i] + var fieldRef = lunr.FieldRef.fromString(matchingFieldRef) + matchingFields[matchingFieldRef] = new lunr.MatchData + } + } + + for (var i = 0; i < matchingFieldRefs.length; i++) { + /* + * Currently we have document fields that match the query, but we + * need to return documents. The matchData and scores are combined + * from multiple fields belonging to the same document. + * + * Scores are calculated by field, using the query vectors created + * above, and combined into a final document score using addition. + */ + var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]), + docRef = fieldRef.docRef + + if (!allRequiredMatches.contains(docRef)) { + continue + } + + if (allProhibitedMatches.contains(docRef)) { + continue + } + + var fieldVector = this.fieldVectors[fieldRef], + score = queryVectors[fieldRef.fieldName].similarity(fieldVector), + docMatch + + if ((docMatch = matches[docRef]) !== undefined) { + docMatch.score += score + docMatch.matchData.combine(matchingFields[fieldRef]) + } else { + var match = { + ref: docRef, + score: score, + matchData: matchingFields[fieldRef] + } + matches[docRef] = match + results.push(match) + } + } + + /* + * Sort the results objects by score, highest first. + */ + return results.sort(function (a, b) { + return b.score - a.score + }) +} + +/** + * Prepares the index for JSON serialization. + * + * The schema for this JSON blob will be described in a + * separate JSON schema file. + * + * @returns {Object} + */ +lunr.Index.prototype.toJSON = function () { + var invertedIndex = Object.keys(this.invertedIndex) + .sort() + .map(function (term) { + return [term, this.invertedIndex[term]] + }, this) + + var fieldVectors = Object.keys(this.fieldVectors) + .map(function (ref) { + return [ref, this.fieldVectors[ref].toJSON()] + }, this) + + return { + version: lunr.version, + fields: this.fields, + fieldVectors: fieldVectors, + invertedIndex: invertedIndex, + pipeline: this.pipeline.toJSON() + } +} + +/** + * Loads a previously serialized lunr.Index + * + * @param {Object} serializedIndex - A previously serialized lunr.Index + * @returns {lunr.Index} + */ +lunr.Index.load = function (serializedIndex) { + var attrs = {}, + fieldVectors = {}, + serializedVectors = serializedIndex.fieldVectors, + invertedIndex = Object.create(null), + serializedInvertedIndex = serializedIndex.invertedIndex, + tokenSetBuilder = new lunr.TokenSet.Builder, + pipeline = lunr.Pipeline.load(serializedIndex.pipeline) + + if (serializedIndex.version != lunr.version) { + lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'") + } + + for (var i = 0; i < serializedVectors.length; i++) { + var tuple = serializedVectors[i], + ref = tuple[0], + elements = tuple[1] + + fieldVectors[ref] = new lunr.Vector(elements) + } + + for (var i = 0; i < serializedInvertedIndex.length; i++) { + var tuple = serializedInvertedIndex[i], + term = tuple[0], + posting = tuple[1] + + tokenSetBuilder.insert(term) + invertedIndex[term] = posting + } + + tokenSetBuilder.finish() + + attrs.fields = serializedIndex.fields + + attrs.fieldVectors = fieldVectors + attrs.invertedIndex = invertedIndex + attrs.tokenSet = tokenSetBuilder.root + attrs.pipeline = pipeline + + return new lunr.Index(attrs) +} +/*! + * lunr.Builder + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.Builder performs indexing on a set of documents and + * returns instances of lunr.Index ready for querying. + * + * All configuration of the index is done via the builder, the + * fields to index, the document reference, the text processing + * pipeline and document scoring parameters are all set on the + * builder before indexing. + * + * @constructor + * @property {string} _ref - Internal reference to the document reference field. + * @property {string[]} _fields - Internal reference to the document fields to index. + * @property {object} invertedIndex - The inverted index maps terms to document fields. + * @property {object} documentTermFrequencies - Keeps track of document term frequencies. + * @property {object} documentLengths - Keeps track of the length of documents added to the index. + * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing. + * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing. + * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index. + * @property {number} documentCount - Keeps track of the total number of documents indexed. + * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75. + * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2. + * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space. + * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index. + */ +lunr.Builder = function () { + this._ref = "id" + this._fields = Object.create(null) + this._documents = Object.create(null) + this.invertedIndex = Object.create(null) + this.fieldTermFrequencies = {} + this.fieldLengths = {} + this.tokenizer = lunr.tokenizer + this.pipeline = new lunr.Pipeline + this.searchPipeline = new lunr.Pipeline + this.documentCount = 0 + this._b = 0.75 + this._k1 = 1.2 + this.termIndex = 0 + this.metadataWhitelist = [] +} + +/** + * Sets the document field used as the document reference. Every document must have this field. + * The type of this field in the document should be a string, if it is not a string it will be + * coerced into a string by calling toString. + * + * The default ref is 'id'. + * + * The ref should _not_ be changed during indexing, it should be set before any documents are + * added to the index. Changing it during indexing can lead to inconsistent results. + * + * @param {string} ref - The name of the reference field in the document. + */ +lunr.Builder.prototype.ref = function (ref) { + this._ref = ref +} + +/** + * A function that is used to extract a field from a document. + * + * Lunr expects a field to be at the top level of a document, if however the field + * is deeply nested within a document an extractor function can be used to extract + * the right field for indexing. + * + * @callback fieldExtractor + * @param {object} doc - The document being added to the index. + * @returns {?(string|object|object[])} obj - The object that will be indexed for this field. + * @example Extracting a nested field + * function (doc) { return doc.nested.field } + */ + +/** + * Adds a field to the list of document fields that will be indexed. Every document being + * indexed should have this field. Null values for this field in indexed documents will + * not cause errors but will limit the chance of that document being retrieved by searches. + * + * All fields should be added before adding documents to the index. Adding fields after + * a document has been indexed will have no effect on already indexed documents. + * + * Fields can be boosted at build time. This allows terms within that field to have more + * importance when ranking search results. Use a field boost to specify that matches within + * one field are more important than other fields. + * + * @param {string} fieldName - The name of a field to index in all documents. + * @param {object} attributes - Optional attributes associated with this field. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this field. + * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document. + * @throws {RangeError} fieldName cannot contain unsupported characters '/' + */ +lunr.Builder.prototype.field = function (fieldName, attributes) { + if (/\//.test(fieldName)) { + throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'") + } + + this._fields[fieldName] = attributes || {} +} + +/** + * A parameter to tune the amount of field length normalisation that is applied when + * calculating relevance scores. A value of 0 will completely disable any normalisation + * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b + * will be clamped to the range 0 - 1. + * + * @param {number} number - The value to set for this tuning parameter. + */ +lunr.Builder.prototype.b = function (number) { + if (number < 0) { + this._b = 0 + } else if (number > 1) { + this._b = 1 + } else { + this._b = number + } +} + +/** + * A parameter that controls the speed at which a rise in term frequency results in term + * frequency saturation. The default value is 1.2. Setting this to a higher value will give + * slower saturation levels, a lower value will result in quicker saturation. + * + * @param {number} number - The value to set for this tuning parameter. + */ +lunr.Builder.prototype.k1 = function (number) { + this._k1 = number +} + +/** + * Adds a document to the index. + * + * Before adding fields to the index the index should have been fully setup, with the document + * ref and all fields to index already having been specified. + * + * The document must have a field name as specified by the ref (by default this is 'id') and + * it should have all fields defined for indexing, though null or undefined values will not + * cause errors. + * + * Entire documents can be boosted at build time. Applying a boost to a document indicates that + * this document should rank higher in search results than other documents. + * + * @param {object} doc - The document to add to the index. + * @param {object} attributes - Optional attributes associated with this document. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this document. + */ +lunr.Builder.prototype.add = function (doc, attributes) { + var docRef = doc[this._ref], + fields = Object.keys(this._fields) + + this._documents[docRef] = attributes || {} + this.documentCount += 1 + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i], + extractor = this._fields[fieldName].extractor, + field = extractor ? extractor(doc) : doc[fieldName], + tokens = this.tokenizer(field, { + fields: [fieldName] + }), + terms = this.pipeline.run(tokens), + fieldRef = new lunr.FieldRef (docRef, fieldName), + fieldTerms = Object.create(null) + + this.fieldTermFrequencies[fieldRef] = fieldTerms + this.fieldLengths[fieldRef] = 0 + + // store the length of this field for this document + this.fieldLengths[fieldRef] += terms.length + + // calculate term frequencies for this field + for (var j = 0; j < terms.length; j++) { + var term = terms[j] + + if (fieldTerms[term] == undefined) { + fieldTerms[term] = 0 + } + + fieldTerms[term] += 1 + + // add to inverted index + // create an initial posting if one doesn't exist + if (this.invertedIndex[term] == undefined) { + var posting = Object.create(null) + posting["_index"] = this.termIndex + this.termIndex += 1 + + for (var k = 0; k < fields.length; k++) { + posting[fields[k]] = Object.create(null) + } + + this.invertedIndex[term] = posting + } + + // add an entry for this term/fieldName/docRef to the invertedIndex + if (this.invertedIndex[term][fieldName][docRef] == undefined) { + this.invertedIndex[term][fieldName][docRef] = Object.create(null) + } + + // store all whitelisted metadata about this token in the + // inverted index + for (var l = 0; l < this.metadataWhitelist.length; l++) { + var metadataKey = this.metadataWhitelist[l], + metadata = term.metadata[metadataKey] + + if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) { + this.invertedIndex[term][fieldName][docRef][metadataKey] = [] + } + + this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata) + } + } + + } +} + +/** + * Calculates the average document length for this index + * + * @private + */ +lunr.Builder.prototype.calculateAverageFieldLengths = function () { + + var fieldRefs = Object.keys(this.fieldLengths), + numberOfFields = fieldRefs.length, + accumulator = {}, + documentsWithField = {} + + for (var i = 0; i < numberOfFields; i++) { + var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), + field = fieldRef.fieldName + + documentsWithField[field] || (documentsWithField[field] = 0) + documentsWithField[field] += 1 + + accumulator[field] || (accumulator[field] = 0) + accumulator[field] += this.fieldLengths[fieldRef] + } + + var fields = Object.keys(this._fields) + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i] + accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName] + } + + this.averageFieldLength = accumulator +} + +/** + * Builds a vector space model of every document using lunr.Vector + * + * @private + */ +lunr.Builder.prototype.createFieldVectors = function () { + var fieldVectors = {}, + fieldRefs = Object.keys(this.fieldTermFrequencies), + fieldRefsLength = fieldRefs.length, + termIdfCache = Object.create(null) + + for (var i = 0; i < fieldRefsLength; i++) { + var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), + fieldName = fieldRef.fieldName, + fieldLength = this.fieldLengths[fieldRef], + fieldVector = new lunr.Vector, + termFrequencies = this.fieldTermFrequencies[fieldRef], + terms = Object.keys(termFrequencies), + termsLength = terms.length + + + var fieldBoost = this._fields[fieldName].boost || 1, + docBoost = this._documents[fieldRef.docRef].boost || 1 + + for (var j = 0; j < termsLength; j++) { + var term = terms[j], + tf = termFrequencies[term], + termIndex = this.invertedIndex[term]._index, + idf, score, scoreWithPrecision + + if (termIdfCache[term] === undefined) { + idf = lunr.idf(this.invertedIndex[term], this.documentCount) + termIdfCache[term] = idf + } else { + idf = termIdfCache[term] + } + + score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf) + score *= fieldBoost + score *= docBoost + scoreWithPrecision = Math.round(score * 1000) / 1000 + // Converts 1.23456789 to 1.234. + // Reducing the precision so that the vectors take up less + // space when serialised. Doing it now so that they behave + // the same before and after serialisation. Also, this is + // the fastest approach to reducing a number's precision in + // JavaScript. + + fieldVector.insert(termIndex, scoreWithPrecision) + } + + fieldVectors[fieldRef] = fieldVector + } + + this.fieldVectors = fieldVectors +} + +/** + * Creates a token set of all tokens in the index using lunr.TokenSet + * + * @private + */ +lunr.Builder.prototype.createTokenSet = function () { + this.tokenSet = lunr.TokenSet.fromArray( + Object.keys(this.invertedIndex).sort() + ) +} + +/** + * Builds the index, creating an instance of lunr.Index. + * + * This completes the indexing process and should only be called + * once all documents have been added to the index. + * + * @returns {lunr.Index} + */ +lunr.Builder.prototype.build = function () { + this.calculateAverageFieldLengths() + this.createFieldVectors() + this.createTokenSet() + + return new lunr.Index({ + invertedIndex: this.invertedIndex, + fieldVectors: this.fieldVectors, + tokenSet: this.tokenSet, + fields: Object.keys(this._fields), + pipeline: this.searchPipeline + }) +} + +/** + * Applies a plugin to the index builder. + * + * A plugin is a function that is called with the index builder as its context. + * Plugins can be used to customise or extend the behaviour of the index + * in some way. A plugin is just a function, that encapsulated the custom + * behaviour that should be applied when building the index. + * + * The plugin function will be called with the index builder as its argument, additional + * arguments can also be passed when calling use. The function will be called + * with the index builder as its context. + * + * @param {Function} plugin The plugin to apply. + */ +lunr.Builder.prototype.use = function (fn) { + var args = Array.prototype.slice.call(arguments, 1) + args.unshift(this) + fn.apply(this, args) +} +/** + * Contains and collects metadata about a matching document. + * A single instance of lunr.MatchData is returned as part of every + * lunr.Index~Result. + * + * @constructor + * @param {string} term - The term this match data is associated with + * @param {string} field - The field in which the term was found + * @param {object} metadata - The metadata recorded about this term in this field + * @property {object} metadata - A cloned collection of metadata associated with this document. + * @see {@link lunr.Index~Result} + */ +lunr.MatchData = function (term, field, metadata) { + var clonedMetadata = Object.create(null), + metadataKeys = Object.keys(metadata || {}) + + // Cloning the metadata to prevent the original + // being mutated during match data combination. + // Metadata is kept in an array within the inverted + // index so cloning the data can be done with + // Array#slice + for (var i = 0; i < metadataKeys.length; i++) { + var key = metadataKeys[i] + clonedMetadata[key] = metadata[key].slice() + } + + this.metadata = Object.create(null) + + if (term !== undefined) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = clonedMetadata + } +} + +/** + * An instance of lunr.MatchData will be created for every term that matches a + * document. However only one instance is required in a lunr.Index~Result. This + * method combines metadata from another instance of lunr.MatchData with this + * objects metadata. + * + * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one. + * @see {@link lunr.Index~Result} + */ +lunr.MatchData.prototype.combine = function (otherMatchData) { + var terms = Object.keys(otherMatchData.metadata) + + for (var i = 0; i < terms.length; i++) { + var term = terms[i], + fields = Object.keys(otherMatchData.metadata[term]) + + if (this.metadata[term] == undefined) { + this.metadata[term] = Object.create(null) + } + + for (var j = 0; j < fields.length; j++) { + var field = fields[j], + keys = Object.keys(otherMatchData.metadata[term][field]) + + if (this.metadata[term][field] == undefined) { + this.metadata[term][field] = Object.create(null) + } + + for (var k = 0; k < keys.length; k++) { + var key = keys[k] + + if (this.metadata[term][field][key] == undefined) { + this.metadata[term][field][key] = otherMatchData.metadata[term][field][key] + } else { + this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key]) + } + + } + } + } +} + +/** + * Add metadata for a term/field pair to this instance of match data. + * + * @param {string} term - The term this match data is associated with + * @param {string} field - The field in which the term was found + * @param {object} metadata - The metadata recorded about this term in this field + */ +lunr.MatchData.prototype.add = function (term, field, metadata) { + if (!(term in this.metadata)) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = metadata + return + } + + if (!(field in this.metadata[term])) { + this.metadata[term][field] = metadata + return + } + + var metadataKeys = Object.keys(metadata) + + for (var i = 0; i < metadataKeys.length; i++) { + var key = metadataKeys[i] + + if (key in this.metadata[term][field]) { + this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key]) + } else { + this.metadata[term][field][key] = metadata[key] + } + } +} +/** + * A lunr.Query provides a programmatic way of defining queries to be performed + * against a {@link lunr.Index}. + * + * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method + * so the query object is pre-initialized with the right index fields. + * + * @constructor + * @property {lunr.Query~Clause[]} clauses - An array of query clauses. + * @property {string[]} allFields - An array of all available fields in a lunr.Index. + */ +lunr.Query = function (allFields) { + this.clauses = [] + this.allFields = allFields +} + +/** + * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause. + * + * This allows wildcards to be added to the beginning and end of a term without having to manually do any string + * concatenation. + * + * The wildcard constants can be bitwise combined to select both leading and trailing wildcards. + * + * @constant + * @default + * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour + * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists + * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with trailing wildcard + * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING }) + * @example query term with leading and trailing wildcard + * query.term('foo', { + * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING + * }) + */ + +lunr.Query.wildcard = new String ("*") +lunr.Query.wildcard.NONE = 0 +lunr.Query.wildcard.LEADING = 1 +lunr.Query.wildcard.TRAILING = 2 + +/** + * Constants for indicating what kind of presence a term must have in matching documents. + * + * @constant + * @enum {number} + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with required presence + * query.term('foo', { presence: lunr.Query.presence.REQUIRED }) + */ +lunr.Query.presence = { + /** + * Term's presence in a document is optional, this is the default value. + */ + OPTIONAL: 1, + + /** + * Term's presence in a document is required, documents that do not contain + * this term will not be returned. + */ + REQUIRED: 2, + + /** + * Term's presence in a document is prohibited, documents that do contain + * this term will not be returned. + */ + PROHIBITED: 3 +} + +/** + * A single clause in a {@link lunr.Query} contains a term and details on how to + * match that term against a {@link lunr.Index}. + * + * @typedef {Object} lunr.Query~Clause + * @property {string[]} fields - The fields in an index this clause should be matched against. + * @property {number} [boost=1] - Any boost that should be applied when matching this clause. + * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be. + * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline. + * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended. + * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents. + */ + +/** + * Adds a {@link lunr.Query~Clause} to this query. + * + * Unless the clause contains the fields to be matched all fields will be matched. In addition + * a default boost of 1 is applied to the clause. + * + * @param {lunr.Query~Clause} clause - The clause to add to this query. + * @see lunr.Query~Clause + * @returns {lunr.Query} + */ +lunr.Query.prototype.clause = function (clause) { + if (!('fields' in clause)) { + clause.fields = this.allFields + } + + if (!('boost' in clause)) { + clause.boost = 1 + } + + if (!('usePipeline' in clause)) { + clause.usePipeline = true + } + + if (!('wildcard' in clause)) { + clause.wildcard = lunr.Query.wildcard.NONE + } + + if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) { + clause.term = "*" + clause.term + } + + if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) { + clause.term = "" + clause.term + "*" + } + + if (!('presence' in clause)) { + clause.presence = lunr.Query.presence.OPTIONAL + } + + this.clauses.push(clause) + + return this +} + +/** + * A negated query is one in which every clause has a presence of + * prohibited. These queries require some special processing to return + * the expected results. + * + * @returns boolean + */ +lunr.Query.prototype.isNegated = function () { + for (var i = 0; i < this.clauses.length; i++) { + if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) { + return false + } + } + + return true +} + +/** + * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause} + * to the list of clauses that make up this query. + * + * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion + * to a token or token-like string should be done before calling this method. + * + * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an + * array, each term in the array will share the same options. + * + * @param {object|object[]} term - The term(s) to add to the query. + * @param {object} [options] - Any additional properties to add to the query clause. + * @returns {lunr.Query} + * @see lunr.Query#clause + * @see lunr.Query~Clause + * @example adding a single term to a query + * query.term("foo") + * @example adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard + * query.term("foo", { + * fields: ["title"], + * boost: 10, + * wildcard: lunr.Query.wildcard.TRAILING + * }) + * @example using lunr.tokenizer to convert a string to tokens before using them as terms + * query.term(lunr.tokenizer("foo bar")) + */ +lunr.Query.prototype.term = function (term, options) { + if (Array.isArray(term)) { + term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this) + return this + } + + var clause = options || {} + clause.term = term.toString() + + this.clause(clause) + + return this +} +lunr.QueryParseError = function (message, start, end) { + this.name = "QueryParseError" + this.message = message + this.start = start + this.end = end +} + +lunr.QueryParseError.prototype = new Error +lunr.QueryLexer = function (str) { + this.lexemes = [] + this.str = str + this.length = str.length + this.pos = 0 + this.start = 0 + this.escapeCharPositions = [] +} + +lunr.QueryLexer.prototype.run = function () { + var state = lunr.QueryLexer.lexText + + while (state) { + state = state(this) + } +} + +lunr.QueryLexer.prototype.sliceString = function () { + var subSlices = [], + sliceStart = this.start, + sliceEnd = this.pos + + for (var i = 0; i < this.escapeCharPositions.length; i++) { + sliceEnd = this.escapeCharPositions[i] + subSlices.push(this.str.slice(sliceStart, sliceEnd)) + sliceStart = sliceEnd + 1 + } + + subSlices.push(this.str.slice(sliceStart, this.pos)) + this.escapeCharPositions.length = 0 + + return subSlices.join('') +} + +lunr.QueryLexer.prototype.emit = function (type) { + this.lexemes.push({ + type: type, + str: this.sliceString(), + start: this.start, + end: this.pos + }) + + this.start = this.pos +} + +lunr.QueryLexer.prototype.escapeCharacter = function () { + this.escapeCharPositions.push(this.pos - 1) + this.pos += 1 +} + +lunr.QueryLexer.prototype.next = function () { + if (this.pos >= this.length) { + return lunr.QueryLexer.EOS + } + + var char = this.str.charAt(this.pos) + this.pos += 1 + return char +} + +lunr.QueryLexer.prototype.width = function () { + return this.pos - this.start +} + +lunr.QueryLexer.prototype.ignore = function () { + if (this.start == this.pos) { + this.pos += 1 + } + + this.start = this.pos +} + +lunr.QueryLexer.prototype.backup = function () { + this.pos -= 1 +} + +lunr.QueryLexer.prototype.acceptDigitRun = function () { + var char, charCode + + do { + char = this.next() + charCode = char.charCodeAt(0) + } while (charCode > 47 && charCode < 58) + + if (char != lunr.QueryLexer.EOS) { + this.backup() + } +} + +lunr.QueryLexer.prototype.more = function () { + return this.pos < this.length +} + +lunr.QueryLexer.EOS = 'EOS' +lunr.QueryLexer.FIELD = 'FIELD' +lunr.QueryLexer.TERM = 'TERM' +lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE' +lunr.QueryLexer.BOOST = 'BOOST' +lunr.QueryLexer.PRESENCE = 'PRESENCE' + +lunr.QueryLexer.lexField = function (lexer) { + lexer.backup() + lexer.emit(lunr.QueryLexer.FIELD) + lexer.ignore() + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexTerm = function (lexer) { + if (lexer.width() > 1) { + lexer.backup() + lexer.emit(lunr.QueryLexer.TERM) + } + + lexer.ignore() + + if (lexer.more()) { + return lunr.QueryLexer.lexText + } +} + +lunr.QueryLexer.lexEditDistance = function (lexer) { + lexer.ignore() + lexer.acceptDigitRun() + lexer.emit(lunr.QueryLexer.EDIT_DISTANCE) + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexBoost = function (lexer) { + lexer.ignore() + lexer.acceptDigitRun() + lexer.emit(lunr.QueryLexer.BOOST) + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexEOS = function (lexer) { + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } +} + +// This matches the separator used when tokenising fields +// within a document. These should match otherwise it is +// not possible to search for some tokens within a document. +// +// It is possible for the user to change the separator on the +// tokenizer so it _might_ clash with any other of the special +// characters already used within the search string, e.g. :. +// +// This means that it is possible to change the separator in +// such a way that makes some words unsearchable using a search +// string. +lunr.QueryLexer.termSeparator = lunr.tokenizer.separator + +lunr.QueryLexer.lexText = function (lexer) { + while (true) { + var char = lexer.next() + + if (char == lunr.QueryLexer.EOS) { + return lunr.QueryLexer.lexEOS + } + + // Escape character is '\' + if (char.charCodeAt(0) == 92) { + lexer.escapeCharacter() + continue + } + + if (char == ":") { + return lunr.QueryLexer.lexField + } + + if (char == "~") { + lexer.backup() + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } + return lunr.QueryLexer.lexEditDistance + } + + if (char == "^") { + lexer.backup() + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } + return lunr.QueryLexer.lexBoost + } + + // "+" indicates term presence is required + // checking for length to ensure that only + // leading "+" are considered + if (char == "+" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + // "-" indicates term presence is prohibited + // checking for length to ensure that only + // leading "-" are considered + if (char == "-" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + if (char.match(lunr.QueryLexer.termSeparator)) { + return lunr.QueryLexer.lexTerm + } + } +} + +lunr.QueryParser = function (str, query) { + this.lexer = new lunr.QueryLexer (str) + this.query = query + this.currentClause = {} + this.lexemeIdx = 0 +} + +lunr.QueryParser.prototype.parse = function () { + this.lexer.run() + this.lexemes = this.lexer.lexemes + + var state = lunr.QueryParser.parseClause + + while (state) { + state = state(this) + } + + return this.query +} + +lunr.QueryParser.prototype.peekLexeme = function () { + return this.lexemes[this.lexemeIdx] +} + +lunr.QueryParser.prototype.consumeLexeme = function () { + var lexeme = this.peekLexeme() + this.lexemeIdx += 1 + return lexeme +} + +lunr.QueryParser.prototype.nextClause = function () { + var completedClause = this.currentClause + this.query.clause(completedClause) + this.currentClause = {} +} + +lunr.QueryParser.parseClause = function (parser) { + var lexeme = parser.peekLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.type) { + case lunr.QueryLexer.PRESENCE: + return lunr.QueryParser.parsePresence + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expected either a field or a term, found " + lexeme.type + + if (lexeme.str.length >= 1) { + errorMessage += " with value '" + lexeme.str + "'" + } + + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } +} + +lunr.QueryParser.parsePresence = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.str) { + case "-": + parser.currentClause.presence = lunr.Query.presence.PROHIBITED + break + case "+": + parser.currentClause.presence = lunr.Query.presence.REQUIRED + break + default: + var errorMessage = "unrecognised presence operator'" + lexeme.str + "'" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term or field, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseField = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + if (parser.query.allFields.indexOf(lexeme.str) == -1) { + var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '), + errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields + + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.fields = [lexeme.str] + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseTerm = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + parser.currentClause.term = lexeme.str.toLowerCase() + + if (lexeme.str.indexOf("*") != -1) { + parser.currentClause.usePipeline = false + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseEditDistance = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + var editDistance = parseInt(lexeme.str, 10) + + if (isNaN(editDistance)) { + var errorMessage = "edit distance must be numeric" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.editDistance = editDistance + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseBoost = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + var boost = parseInt(lexeme.str, 10) + + if (isNaN(boost)) { + var errorMessage = "boost must be numeric" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.boost = boost + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + + /** + * export the module via AMD, CommonJS or as a browser global + * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js + */ + ;(function (root, factory) { + if (typeof define === 'function' && define.amd) { + // AMD. Register as an anonymous module. + define(factory) + } else if (typeof exports === 'object') { + /** + * Node. Does not work with strict CommonJS, but + * only CommonJS-like environments that support module.exports, + * like Node. + */ + module.exports = factory() + } else { + // Browser globals (root is window) + root.lunr = factory() + } + }(this, function () { + /** + * Just return a value to define the module export. + * This example returns an object, but the module + * can return a function as the exported value. + */ + return lunr + })) +})(); diff --git a/search/main.js b/search/main.js new file mode 100644 index 00000000..a5e469d7 --- /dev/null +++ b/search/main.js @@ -0,0 +1,109 @@ +function getSearchTermFromLocation() { + var sPageURL = window.location.search.substring(1); + var sURLVariables = sPageURL.split('&'); + for (var i = 0; i < sURLVariables.length; i++) { + var sParameterName = sURLVariables[i].split('='); + if (sParameterName[0] == 'q') { + return decodeURIComponent(sParameterName[1].replace(/\+/g, '%20')); + } + } +} + +function joinUrl (base, path) { + if (path.substring(0, 1) === "/") { + // path starts with `/`. Thus it is absolute. + return path; + } + if (base.substring(base.length-1) === "/") { + // base ends with `/` + return base + path; + } + return base + "/" + path; +} + +function escapeHtml (value) { + return value.replace(/&/g, '&') + .replace(/"/g, '"') + .replace(//g, '>'); +} + +function formatResult (location, title, summary) { + return ''; +} + +function displayResults (results) { + var search_results = document.getElementById("mkdocs-search-results"); + while (search_results.firstChild) { + search_results.removeChild(search_results.firstChild); + } + if (results.length > 0){ + for (var i=0; i < results.length; i++){ + var result = results[i]; + var html = formatResult(result.location, result.title, result.summary); + search_results.insertAdjacentHTML('beforeend', html); + } + } else { + var noResultsText = search_results.getAttribute('data-no-results-text'); + if (!noResultsText) { + noResultsText = "No results found"; + } + search_results.insertAdjacentHTML('beforeend', '

' + noResultsText + '

'); + } +} + +function doSearch () { + var query = document.getElementById('mkdocs-search-query').value; + if (query.length > min_search_length) { + if (!window.Worker) { + displayResults(search(query)); + } else { + searchWorker.postMessage({query: query}); + } + } else { + // Clear results for short queries + displayResults([]); + } +} + +function initSearch () { + var search_input = document.getElementById('mkdocs-search-query'); + if (search_input) { + search_input.addEventListener("keyup", doSearch); + } + var term = getSearchTermFromLocation(); + if (term) { + search_input.value = term; + doSearch(); + } +} + +function onWorkerMessage (e) { + if (e.data.allowSearch) { + initSearch(); + } else if (e.data.results) { + var results = e.data.results; + displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; + } +} + +if (!window.Worker) { + console.log('Web Worker API not supported'); + // load index in main thread + $.getScript(joinUrl(base_url, "search/worker.js")).done(function () { + console.log('Loaded worker'); + init(); + window.postMessage = function (msg) { + onWorkerMessage({data: msg}); + }; + }).fail(function (jqxhr, settings, exception) { + console.error('Could not load worker.js'); + }); +} else { + // Wrap search in a web worker + var searchWorker = new Worker(joinUrl(base_url, "search/worker.js")); + searchWorker.postMessage({init: true}); + searchWorker.onmessage = onWorkerMessage; +} diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..53a5ae53 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"On this page: Triply Documentation TriplyDB TriplyETL Triply Documentation \u00b6 What can we help you with? TriplyDB \u00b6 TriplyDB is a state-of-the-art linked database / triple store that is used by organizations of any size: from start-ups to orgs with 10K+ employees. Learn more about how to use TriplyDB TriplyETL \u00b6 Use TriplyETL to quickly connect your data sources to your linked database / triple store. TriplyETL can be extract, transform, enrich, validate, and load linked data. Learn more about how to use TriplyETL Didn't find what you were looking for? Contact us via our form or by e-mailing to info@triply.cc .","title":"Home"},{"location":"#triply-documentation","text":"What can we help you with?","title":"Triply Documentation"},{"location":"#triplydb","text":"TriplyDB is a state-of-the-art linked database / triple store that is used by organizations of any size: from start-ups to orgs with 10K+ employees. Learn more about how to use TriplyDB","title":"TriplyDB"},{"location":"#triplyetl","text":"Use TriplyETL to quickly connect your data sources to your linked database / triple store. TriplyETL can be extract, transform, enrich, validate, and load linked data. Learn more about how to use TriplyETL Didn't find what you were looking for? Contact us via our form or by e-mailing to info@triply.cc .","title":"TriplyETL"},{"location":"generics/Graphql/","text":"On this page: Graphql implementation Schema Object types Fields IDs Naming Renaming Queries Global Object identification Pagination Filtering Simple cases Language filtering Advanced filtering Graphql implementation \u00b6 Some TriplyDB instances expose a GraphQL endpoint. This endpoint uses information from user-provided SHACL shapes for the schema creation. The goal of this documentation is to inform users about Triply's implementation of the GraphQL endpoint. For more generic information about GraphQL, you can visit graphql.org or other resources. In order to understand this documentation, you have to be familiar with the SHACL language. Note: in order to avoid confusion we will use the noun object as a synonym for resource and triple object when referring to the third element of a triple. Schema \u00b6 Object types \u00b6 A basic element of the schema is object types, which represents the type of the resources that you can query. type Book { id:ID! title:[XsdString]! } This object type corresponds to the shape below: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path dc:title; sh:datatype xsd:string ]. Fields \u00b6 Fields in object types, such as title , represent properties of nodes. By default, fields return arrays of values. The only exception is when the property has sh:maxCount: 1 , then the field returns a single value. Thus, for the shape: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path dc:title; sh:maxCount 1; sh:datatype xsd:string ]. The object type will be: type Book { id:ID! title:XsdString } Additionally, following the best practices , fields can give null results, except for: IDs, which represents the IRI of the resource. Lists, but not their elements Properties that have sh:minCount 1 and sh:maxCount 1 Thus, for this shape: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path dc:title; sh:maxCount 1; sh:minCount 1; sh:datatype xsd:string ]. The corresponding object type is: type Book { id:ID! title:XsdString! } If the property shape includes an sh:datatype , the field returns values of GraphQL scalar type (see example above). On the other hand, if the property shape has an sh:class pointing to a class that: - is the sh:targetClass of a node shape, the field returns values of the corresponding object type. - is not mentioned as a sh:targetClass in a node shape, then the type of the returned values is ExternalIri . Therefore, the shapes: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path sdo:author; sh:class sdo:Person ]; [ sh:path sdo:audio; sh:class sdo:AudioObject ]. shp:Person a sh:NodeShape; sh:targetClass sdo:Person; sh:property [ sh:path sdo:name; sh:datatype xsd:string ]. correspond to the below graphql types: type Book { id:ID! author:[Person]! audio:[ExternalIri]! } type Person { id:ID! name:[XsdString]! } IDs \u00b6 The id field is of type ID, which represents the IRI of each resource. This ID is unique. For example: book:Odyssey a sdo:Book; dct:title \"Odyssey\". The id field of this resource would be https://example.org/book/Odyssey . You can read more information on the ID scalar in graphql.org . Also, the use of the id field is mentioned later in the section Object Global Identification . Naming \u00b6 In order to name the GraphQL types in correspondence to shapes, we follow the below conventions: - For object types, we use the sh:targetClass of the node shape. - For object type fields, we use the sh:path of the property shape. More specifically, the name comes from the part of the IRI after the last # or otherwise the last / , converted from kebab-case to camelCase. Notice that if the selected name is illegal or causes a name collision, we'll return an error informing the user about the problem and ignore this type or field. Renaming \u00b6 Shape designers are able use their custom names by using a special property: . More specifically, the designer has to add a triple with : - for object types, the class IRI - for fields, the IRI of the property shape as a subject, the above-mentioned predicate and a string literal with the custom name as triple object. If we wanted to rename using the first example of the section, we would do: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path dc:title; triply:graphqlName \"name\"; # Rename the object type field sh:datatype xsd:string ] sdo:Book triply:graphqlName \"PieceOfArt\". # Rename the object type field. Then the corresponding object type would be: type PieceOfArt { id:ID! name:[XsdString]! } Queries \u00b6 The user can query for objects using their unique ID. Also, they can query for objects of a specific type along with fields, and get nested information. Last, the user can get information by filtering results. Let's see some important concepts. Global Object identification \u00b6 For reasons such as caching, the user should be able to query an object by their unique ID. This is possible using global object identification, using the node(id:ID) query. An example: { node(id: \"https://example.org/book/Odyssey\") { id } } For more information on global object identification, see graphql specification . Pagination \u00b6 A simple query would be: { BookConnection { edges { node { id title } } } } The results would include the IRIs of books together with their titles and would be paginated. In order to paginate through a large number of results, our GraphQL implementation supports cursor-based pagination using connections . For more information, please visit the Relay project's cursor-based connection pagination specification . Filtering \u00b6 When you query for objects, you might want to get back resources based on specific values in certain fields. You can do this by filtering. Simple cases \u00b6 For example, you can query for people with a specific id: { PersonConnection(filter: {id: \"https://example.org/person/Homer\"}) { edges { node { id name } } } } Another query would be to search for a person with a specific name: { PersonConnection(filter: {name: {eq: \"Homer\"}}) { edges { node { id name } } } } Notice that in the second example, there is a new field for filtering called eq . When we want to filter on a field with returns a scalar, meaning that its value is represented by a literal in linked data, we have to use comparison operators: eq , in for equality, and notEq and notIn for inequality. The operators in and notIn are refering to lists. On the other hand, when we are filtering based on IDs - or in linked data terms, based on the IRI - , as in the first example, we don't use comparison operators. The only idiomatic case is the literal with a language tag and rdf:langString as a datatype. This literal is represented as { value: \"example-string\", language: \"en\" } and the corresponding scalar is RdfsLangString . This means that in order to filter using a value of this scalar type, you have to execute the query below: { PersonConnection(filter: {name: {eq: {value: \"Odysseus\", language: \"en\"}}}) { edges { node { id name } } } } Language filtering \u00b6 Additionally, there is support for filtering results based on the language tag. An example is: Linked data: person:Odysseus a sdo:Person; sdo:name \"Odysseus\"@en, \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\"@gr. shp:Person a sh:NodeShape; sh:targetClass sdo:Person; sh:property [ sh:path sdo:name; sh:datatype rdf:langString ]. GraphQL query: { PersonConnection { edges { node { id name(language:\"gr\") } } } } Results: { \"data\": { \"PersonConnection\": { \"edges\": [ { \"node\": { \"id\": \"https://example.org/person/Odysseus\", \"name\": [ { \"value\": \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\", \"language\": \"gr\" } ] } } ] } } } Our implementation supports using the HTTP Accept-Language syntax, for filtering based on a language-tag. For example, GraphQL query: { PersonConnection { edges { node { id name(language:\"gr, en;q=.5\") } } } } Results: { \"data\": { \"PersonConnection\": { \"edges\": [ { \"node\": { \"id\": \"https://example.org/person/Odysseus\", \"name\": [ { \"value\": \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\", \"language\": \"gr\" }, { \"value\": \"Odysseus\", \"language\": \"en\" }, ] } } ] } } } If the writer of the shapes includes the sh:uniqueLang constraint, then the result returned will be a single value, instead of an array. Thus, the example becomes: Linked data: person:Odysseus a sdo:Person; sdo:name \"Odysseus\"@en, \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\"@gr. shp:Person a sh:NodeShape; sh:targetClass sdo:Person; sh:property [ sh:path sdo:name; sh:uniqueLang true; sh:datatype rdf:langString ]. GraphQL query: { PersonConnection { edges { node { id name(language:\"gr, en;q=.5\") } } } } Results: { \"data\": { \"PersonConnection\": { \"edges\": [ { \"node\": { \"id\": \"https://example.org/person/Odysseus\", \"name\": { \"value\": \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\", \"language\": \"gr\" } } } ] } } } Advanced filtering \u00b6 Furthermore, there is possibility for nested filtering: { BookConnection( filter: {author: {name: {eq: \"Homer\"}}} ) { edges { node { id } } } } and for combination of filters: { BookConnection( filter: {author: {name: {eq: \"Homer\"}}, name: {eq: \"Odyssey\"}} ) { edges { node { id } } } } Note: The combination of filters is executed in an 'and' logic.","title":"GraphQL implementation information"},{"location":"generics/Graphql/#graphql-implementation","text":"Some TriplyDB instances expose a GraphQL endpoint. This endpoint uses information from user-provided SHACL shapes for the schema creation. The goal of this documentation is to inform users about Triply's implementation of the GraphQL endpoint. For more generic information about GraphQL, you can visit graphql.org or other resources. In order to understand this documentation, you have to be familiar with the SHACL language. Note: in order to avoid confusion we will use the noun object as a synonym for resource and triple object when referring to the third element of a triple.","title":"Graphql implementation"},{"location":"generics/Graphql/#schema","text":"","title":"Schema"},{"location":"generics/Graphql/#object-types","text":"A basic element of the schema is object types, which represents the type of the resources that you can query. type Book { id:ID! title:[XsdString]! } This object type corresponds to the shape below: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path dc:title; sh:datatype xsd:string ].","title":"Object types"},{"location":"generics/Graphql/#fields","text":"Fields in object types, such as title , represent properties of nodes. By default, fields return arrays of values. The only exception is when the property has sh:maxCount: 1 , then the field returns a single value. Thus, for the shape: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path dc:title; sh:maxCount 1; sh:datatype xsd:string ]. The object type will be: type Book { id:ID! title:XsdString } Additionally, following the best practices , fields can give null results, except for: IDs, which represents the IRI of the resource. Lists, but not their elements Properties that have sh:minCount 1 and sh:maxCount 1 Thus, for this shape: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path dc:title; sh:maxCount 1; sh:minCount 1; sh:datatype xsd:string ]. The corresponding object type is: type Book { id:ID! title:XsdString! } If the property shape includes an sh:datatype , the field returns values of GraphQL scalar type (see example above). On the other hand, if the property shape has an sh:class pointing to a class that: - is the sh:targetClass of a node shape, the field returns values of the corresponding object type. - is not mentioned as a sh:targetClass in a node shape, then the type of the returned values is ExternalIri . Therefore, the shapes: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path sdo:author; sh:class sdo:Person ]; [ sh:path sdo:audio; sh:class sdo:AudioObject ]. shp:Person a sh:NodeShape; sh:targetClass sdo:Person; sh:property [ sh:path sdo:name; sh:datatype xsd:string ]. correspond to the below graphql types: type Book { id:ID! author:[Person]! audio:[ExternalIri]! } type Person { id:ID! name:[XsdString]! }","title":"Fields"},{"location":"generics/Graphql/#ids","text":"The id field is of type ID, which represents the IRI of each resource. This ID is unique. For example: book:Odyssey a sdo:Book; dct:title \"Odyssey\". The id field of this resource would be https://example.org/book/Odyssey . You can read more information on the ID scalar in graphql.org . Also, the use of the id field is mentioned later in the section Object Global Identification .","title":"IDs"},{"location":"generics/Graphql/#naming","text":"In order to name the GraphQL types in correspondence to shapes, we follow the below conventions: - For object types, we use the sh:targetClass of the node shape. - For object type fields, we use the sh:path of the property shape. More specifically, the name comes from the part of the IRI after the last # or otherwise the last / , converted from kebab-case to camelCase. Notice that if the selected name is illegal or causes a name collision, we'll return an error informing the user about the problem and ignore this type or field.","title":"Naming"},{"location":"generics/Graphql/#renaming","text":"Shape designers are able use their custom names by using a special property: . More specifically, the designer has to add a triple with : - for object types, the class IRI - for fields, the IRI of the property shape as a subject, the above-mentioned predicate and a string literal with the custom name as triple object. If we wanted to rename using the first example of the section, we would do: shp:Book a sh:NodeShape; sh:targetClass sdo:Book; sh:property [ sh:path dc:title; triply:graphqlName \"name\"; # Rename the object type field sh:datatype xsd:string ] sdo:Book triply:graphqlName \"PieceOfArt\". # Rename the object type field. Then the corresponding object type would be: type PieceOfArt { id:ID! name:[XsdString]! }","title":"Renaming"},{"location":"generics/Graphql/#queries","text":"The user can query for objects using their unique ID. Also, they can query for objects of a specific type along with fields, and get nested information. Last, the user can get information by filtering results. Let's see some important concepts.","title":"Queries"},{"location":"generics/Graphql/#global-object-identification","text":"For reasons such as caching, the user should be able to query an object by their unique ID. This is possible using global object identification, using the node(id:ID) query. An example: { node(id: \"https://example.org/book/Odyssey\") { id } } For more information on global object identification, see graphql specification .","title":"Global Object identification"},{"location":"generics/Graphql/#pagination","text":"A simple query would be: { BookConnection { edges { node { id title } } } } The results would include the IRIs of books together with their titles and would be paginated. In order to paginate through a large number of results, our GraphQL implementation supports cursor-based pagination using connections . For more information, please visit the Relay project's cursor-based connection pagination specification .","title":"Pagination"},{"location":"generics/Graphql/#filtering","text":"When you query for objects, you might want to get back resources based on specific values in certain fields. You can do this by filtering.","title":"Filtering"},{"location":"generics/Graphql/#simple-cases","text":"For example, you can query for people with a specific id: { PersonConnection(filter: {id: \"https://example.org/person/Homer\"}) { edges { node { id name } } } } Another query would be to search for a person with a specific name: { PersonConnection(filter: {name: {eq: \"Homer\"}}) { edges { node { id name } } } } Notice that in the second example, there is a new field for filtering called eq . When we want to filter on a field with returns a scalar, meaning that its value is represented by a literal in linked data, we have to use comparison operators: eq , in for equality, and notEq and notIn for inequality. The operators in and notIn are refering to lists. On the other hand, when we are filtering based on IDs - or in linked data terms, based on the IRI - , as in the first example, we don't use comparison operators. The only idiomatic case is the literal with a language tag and rdf:langString as a datatype. This literal is represented as { value: \"example-string\", language: \"en\" } and the corresponding scalar is RdfsLangString . This means that in order to filter using a value of this scalar type, you have to execute the query below: { PersonConnection(filter: {name: {eq: {value: \"Odysseus\", language: \"en\"}}}) { edges { node { id name } } } }","title":"Simple cases"},{"location":"generics/Graphql/#language-filtering","text":"Additionally, there is support for filtering results based on the language tag. An example is: Linked data: person:Odysseus a sdo:Person; sdo:name \"Odysseus\"@en, \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\"@gr. shp:Person a sh:NodeShape; sh:targetClass sdo:Person; sh:property [ sh:path sdo:name; sh:datatype rdf:langString ]. GraphQL query: { PersonConnection { edges { node { id name(language:\"gr\") } } } } Results: { \"data\": { \"PersonConnection\": { \"edges\": [ { \"node\": { \"id\": \"https://example.org/person/Odysseus\", \"name\": [ { \"value\": \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\", \"language\": \"gr\" } ] } } ] } } } Our implementation supports using the HTTP Accept-Language syntax, for filtering based on a language-tag. For example, GraphQL query: { PersonConnection { edges { node { id name(language:\"gr, en;q=.5\") } } } } Results: { \"data\": { \"PersonConnection\": { \"edges\": [ { \"node\": { \"id\": \"https://example.org/person/Odysseus\", \"name\": [ { \"value\": \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\", \"language\": \"gr\" }, { \"value\": \"Odysseus\", \"language\": \"en\" }, ] } } ] } } } If the writer of the shapes includes the sh:uniqueLang constraint, then the result returned will be a single value, instead of an array. Thus, the example becomes: Linked data: person:Odysseus a sdo:Person; sdo:name \"Odysseus\"@en, \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\"@gr. shp:Person a sh:NodeShape; sh:targetClass sdo:Person; sh:property [ sh:path sdo:name; sh:uniqueLang true; sh:datatype rdf:langString ]. GraphQL query: { PersonConnection { edges { node { id name(language:\"gr, en;q=.5\") } } } } Results: { \"data\": { \"PersonConnection\": { \"edges\": [ { \"node\": { \"id\": \"https://example.org/person/Odysseus\", \"name\": { \"value\": \"\u039f\u03b4\u03c5\u03c3\u03c3\u03b5\u03cd\u03c2\", \"language\": \"gr\" } } } ] } } }","title":"Language filtering"},{"location":"generics/Graphql/#advanced-filtering","text":"Furthermore, there is possibility for nested filtering: { BookConnection( filter: {author: {name: {eq: \"Homer\"}}} ) { edges { node { id } } } } and for combination of filters: { BookConnection( filter: {author: {name: {eq: \"Homer\"}}, name: {eq: \"Odyssey\"}} ) { edges { node { id } } } } Note: The combination of filters is executed in an 'and' logic.","title":"Advanced filtering"},{"location":"generics/JSON-LD-frames/","text":"On this page: JSON-LD Framing Why JSON-LD Framing? The SPARQL Query The Frame Using SPARQL to create a frame JSON-LD Framing \u00b6 Why JSON-LD Framing? \u00b6 SPARQL Construct and SPARQL Describe queries can return results in the JSON-LD format. Here is an example: [ { \"@id\": \"john\", \"livesIn\": { \"@id\": \"amsterdam\" } }, { \"@id\": \"jane\", \"livesIn\": { \"@id\": \"berlin\" } }, { \"@id\": \"tim\", \"livesIn\": { \"@id\": \"berlin\" } } ] JSON-LD is one of the serialization formats for RDF, and encodes a graph structure. For example, the JSON-LD snippet above encodes the following graph: graph TB Tim -- livesIn --> Berlin John -- livesIn --> Amsterdam Jane -- livesIn --> Berlin The triples in a graphs do not have any specific order. In our graph picture, the triple about Tim is mentioned first, but this is arbitrary. A graph is a set of triples, so there is no 'first' or 'last' triple. Similarly, there is no 'primary' or 'secondary' element in a graph structure either. In our graph picture, persons occur on the left hand-side and cities occur on the right hand-side. In fact, the same information can be expressed with the following graph: Most RESTful APIs return data with a specific, often tree-shaped structure. For example: { \"amsterdam\": { \"inhabitants\": [ \"john\" ] }, \"berlin\": { \"inhabitants\": [ \"jane\", \"tim\" ] } } JSON-LD Framing is a standard that is used to assign additional structure to JSON-LD. With JSON-LD Framing, we can configure the extra structure that is needed to create RESTful APIs over SPARQL queries. JSON-LD Framing are a deterministic translation from a graph, which has an unordered set of triples where no node is \"first\" or \"special\", into a tree, which has ordered branches and exactly one \"root\" node. In other words, JSON-LD framing allows one to force a specific tree layout to a JSON-LD document. This makes it possible to translate SPARQL queries to REST-APIs. The TriplyDB API for saved queries has been equipped with a JSON-LD profiler which can apply a JSON-LD profile to a JSON-LD result, transforming the plain JSON-LD to framed JSON. To do this you need two things. A SPARQL construct query and a JSON-LD frame. When you have both of these, you can retrieve plain JSON from a SPARQL query. The cURL command when both the SPARQL query and JSON-LD frame are available is: curl -X POST [SAVED-QUERY-URL] \\ -H 'Accept: application/ld+json;profile=http://www.w3.org/ns/json-ld#framed' \\ -H 'Authorization: Bearer [YOUR_TOKEN]' \\ -H 'Content-type: application/json' \\ -d '[YOUR_FRAME]' When sending a curl request, a few things are important. First, the request needs to be a POST request. Only a POST request can accept a frame as a body. The Accept header needs to be set to a specific value. The Accept header needs to have both the expected returned content-type and the JSON-LD profile, e.g. application/ld+json;profile=http://www.w3.org/ns/json-ld#framed . When querying an internal or private query you need to add an authorization token. Finally, it is important to set the Content-type . It refers to the content-type of the input body and needs to be application/json , as the frame is of type application/json . The SPARQL Query \u00b6 Let's start with the SPARQL query. A JSON-LD frame query needs a SPARQL Construct query to create an RDF graph that is self contained and populated with relevant vocabulary and data. The graph in JSON-LD is used as input for the RESTful API call. The SPARQL Construct query can be designed with API variables. Do note that API variables with OPTIONAL s can sometimes behave a bit different than regular API variables. This is due to how SPARQL interprets OPTIONAL s. If an API variable is used in an OPTIONAL , the query will return false positives, as the OPTIONAL does not filter out results matching the API-variable. Also note that the use of UNION s can have unexpected effects on the SPARQL query. A union could split up the result set of the SPARQL query. Meaning that the SPARQL engine first exhausts the top part of the UNION and then starts with the second part of the UNION . This means that the first part of the result set can be disconnected from the second part. If the limit is set too small the result set is separated in two different JSON-LD documents. This could result in missing data in the response. Finally, please note that it can happen that you set a pageSize of 10 but the response contains less than 10 results, while the next page is not empty. This is possible as the result set of the WHERE clause is limited with a limit and not the Construct clause. This means that two rows of the resulting WHERE clause are condensed into a single result in the Construct clause. Thus the response of the API can differ from the pageSize . The result is a set of triples according to the query. Saving the SPARQL query will resolve in a saved query. The saved query has an API URL that we can now use in our cURL command. The URL most of the time starts with api and ends with run . The saved query url of an example query is: https://api.triplydb.com/queries/JD/JSON-LD-frame/run You could use API variables with a ? e.g. ?[queryVariable]=[value] The Frame \u00b6 The SPARQL query is not enough to provide the RDF data in a JSON serialization format. It requires additional syntactic conformities that cannot be defined in a SPARQL query. Thus the SPARQL query that was created needs a frame to restructure JSON-LD objects into JSON. The JSON-LD 1.1 standard allows for restructuring JSON-LD objects with a frame to JSON. A JSON-LD frame consists out of 2 parts. The @context of the response, and the structure of the response. The complete specification on JSON-LD frames can be found online The @context is the translation of the linked data to the JSON naming. In the @context all the IRIs that occur in the JSON-LD response are documented, with key-value pairs, where the key corresponds to a name the IRI will take in the REST-API response and the value corresponds to the IRI in the JSON-LD response. Most of the time the key-value pairs are one-to-one relations, where one key is mapped to a single string. Sometimes the value is an object. The object contains at least the @id , which is the IRI in the JSON-LD response. The object can also contain other modifiers, that change the REST-API response. Examples are, @type to define the datatype of the object value, or @container to define the container where the value in the REST-API response is stored in. The context can also hold references to vocabularies or prefixes. The second part of the JSON-LD frame is the structure of the data. The structure defines how the REST-API response will look like. Most of the time the structure starts with @type to denote the type that the root node should have. Setting the @type is the most straightforward way of selecting your root node. The structure is built outward from the root node. You can define a leaf node in the structure by adding an opening and closing bracket, as shown in the example. To define a nested node you first need to define the key that is a object property in the JSON-LD response that points to another IRI. Then from that IRI the node is created filling in the properties of that node. { \"@context\": { \"addresses\": \"ex:address\", \"Address\": \"ex:Address\", \"Object\": \"ex:Object\", \"street\": \"ex:street\", \"number\": { \"@id\": \"ex:number\", \"@type\": \"xsd:integer\" }, \"labels\": { \"@id\": \"ex:label\", \"@container\": \"@set\" }, \"ex\": \"https://triply.cc/example/\", \"xsd\": \"http://www.w3.org/2001/XMLSchema#\" }, \"@type\": \"Object\", \"labels\": {}, \"addresses\": { \"street\": {}, \"number\": {} } } The JSON-LD frame together with the SPARQL query will now result in a REST-API result: curl -X POST https://api.triplydb.com/queries/JD/JSON-LD-frame/run \\ -H 'Accept: application/ld+json;profile=http://www.w3.org/ns/json-ld#framed' \\ -H 'Content-type: application/json' \\ -d '{ \"@context\": { \"addresses\": \"ex:address\", \"Address\": \"ex:Address\", \"Object\": \"ex:Object\", \"street\": \"ex:street\", \"number\": { \"@id\": \"ex:number\", \"@type\": \"xsd:integer\" }, \"labels\": { \"@id\": \"ex:label\", \"@container\": \"@set\" }, \"ex\": \"https://triply.cc/example/\", \"xsd\": \"http://www.w3.org/2001/XMLSchema#\" }, \"@type\": \"Object\", \"labels\": {}, \"addresses\": { \"street\": {}, \"number\": {} } }' The JSON-LD frame turns SPARQL results for the query in step 1 into a format that is accepted as plain RESTful API request. Using SPARQL to create a frame \u00b6 Another way to create a frame is by using the SPARQL editor in TriplyDB. You can access the JSON-LD editor by clicking the three dots next to the SPARQL editor, and then selecting \"To JSON-LD frame editor\". Afterwards, the JSON script from above should be added to the JSON-LD Frame editor. Running the script results in the following REST-API result: This can also be accessed by the generated API Link above the SPARQL editor. Copying and pasting the generated link will direct you to a page where you can view the script:","title":"JSON-LD Framing"},{"location":"generics/JSON-LD-frames/#json-ld-framing","text":"","title":"JSON-LD Framing"},{"location":"generics/JSON-LD-frames/#why-json-ld-framing","text":"SPARQL Construct and SPARQL Describe queries can return results in the JSON-LD format. Here is an example: [ { \"@id\": \"john\", \"livesIn\": { \"@id\": \"amsterdam\" } }, { \"@id\": \"jane\", \"livesIn\": { \"@id\": \"berlin\" } }, { \"@id\": \"tim\", \"livesIn\": { \"@id\": \"berlin\" } } ] JSON-LD is one of the serialization formats for RDF, and encodes a graph structure. For example, the JSON-LD snippet above encodes the following graph: graph TB Tim -- livesIn --> Berlin John -- livesIn --> Amsterdam Jane -- livesIn --> Berlin The triples in a graphs do not have any specific order. In our graph picture, the triple about Tim is mentioned first, but this is arbitrary. A graph is a set of triples, so there is no 'first' or 'last' triple. Similarly, there is no 'primary' or 'secondary' element in a graph structure either. In our graph picture, persons occur on the left hand-side and cities occur on the right hand-side. In fact, the same information can be expressed with the following graph: Most RESTful APIs return data with a specific, often tree-shaped structure. For example: { \"amsterdam\": { \"inhabitants\": [ \"john\" ] }, \"berlin\": { \"inhabitants\": [ \"jane\", \"tim\" ] } } JSON-LD Framing is a standard that is used to assign additional structure to JSON-LD. With JSON-LD Framing, we can configure the extra structure that is needed to create RESTful APIs over SPARQL queries. JSON-LD Framing are a deterministic translation from a graph, which has an unordered set of triples where no node is \"first\" or \"special\", into a tree, which has ordered branches and exactly one \"root\" node. In other words, JSON-LD framing allows one to force a specific tree layout to a JSON-LD document. This makes it possible to translate SPARQL queries to REST-APIs. The TriplyDB API for saved queries has been equipped with a JSON-LD profiler which can apply a JSON-LD profile to a JSON-LD result, transforming the plain JSON-LD to framed JSON. To do this you need two things. A SPARQL construct query and a JSON-LD frame. When you have both of these, you can retrieve plain JSON from a SPARQL query. The cURL command when both the SPARQL query and JSON-LD frame are available is: curl -X POST [SAVED-QUERY-URL] \\ -H 'Accept: application/ld+json;profile=http://www.w3.org/ns/json-ld#framed' \\ -H 'Authorization: Bearer [YOUR_TOKEN]' \\ -H 'Content-type: application/json' \\ -d '[YOUR_FRAME]' When sending a curl request, a few things are important. First, the request needs to be a POST request. Only a POST request can accept a frame as a body. The Accept header needs to be set to a specific value. The Accept header needs to have both the expected returned content-type and the JSON-LD profile, e.g. application/ld+json;profile=http://www.w3.org/ns/json-ld#framed . When querying an internal or private query you need to add an authorization token. Finally, it is important to set the Content-type . It refers to the content-type of the input body and needs to be application/json , as the frame is of type application/json .","title":"Why JSON-LD Framing?"},{"location":"generics/JSON-LD-frames/#the-sparql-query","text":"Let's start with the SPARQL query. A JSON-LD frame query needs a SPARQL Construct query to create an RDF graph that is self contained and populated with relevant vocabulary and data. The graph in JSON-LD is used as input for the RESTful API call. The SPARQL Construct query can be designed with API variables. Do note that API variables with OPTIONAL s can sometimes behave a bit different than regular API variables. This is due to how SPARQL interprets OPTIONAL s. If an API variable is used in an OPTIONAL , the query will return false positives, as the OPTIONAL does not filter out results matching the API-variable. Also note that the use of UNION s can have unexpected effects on the SPARQL query. A union could split up the result set of the SPARQL query. Meaning that the SPARQL engine first exhausts the top part of the UNION and then starts with the second part of the UNION . This means that the first part of the result set can be disconnected from the second part. If the limit is set too small the result set is separated in two different JSON-LD documents. This could result in missing data in the response. Finally, please note that it can happen that you set a pageSize of 10 but the response contains less than 10 results, while the next page is not empty. This is possible as the result set of the WHERE clause is limited with a limit and not the Construct clause. This means that two rows of the resulting WHERE clause are condensed into a single result in the Construct clause. Thus the response of the API can differ from the pageSize . The result is a set of triples according to the query. Saving the SPARQL query will resolve in a saved query. The saved query has an API URL that we can now use in our cURL command. The URL most of the time starts with api and ends with run . The saved query url of an example query is: https://api.triplydb.com/queries/JD/JSON-LD-frame/run You could use API variables with a ? e.g. ?[queryVariable]=[value]","title":"The SPARQL Query"},{"location":"generics/JSON-LD-frames/#the-frame","text":"The SPARQL query is not enough to provide the RDF data in a JSON serialization format. It requires additional syntactic conformities that cannot be defined in a SPARQL query. Thus the SPARQL query that was created needs a frame to restructure JSON-LD objects into JSON. The JSON-LD 1.1 standard allows for restructuring JSON-LD objects with a frame to JSON. A JSON-LD frame consists out of 2 parts. The @context of the response, and the structure of the response. The complete specification on JSON-LD frames can be found online The @context is the translation of the linked data to the JSON naming. In the @context all the IRIs that occur in the JSON-LD response are documented, with key-value pairs, where the key corresponds to a name the IRI will take in the REST-API response and the value corresponds to the IRI in the JSON-LD response. Most of the time the key-value pairs are one-to-one relations, where one key is mapped to a single string. Sometimes the value is an object. The object contains at least the @id , which is the IRI in the JSON-LD response. The object can also contain other modifiers, that change the REST-API response. Examples are, @type to define the datatype of the object value, or @container to define the container where the value in the REST-API response is stored in. The context can also hold references to vocabularies or prefixes. The second part of the JSON-LD frame is the structure of the data. The structure defines how the REST-API response will look like. Most of the time the structure starts with @type to denote the type that the root node should have. Setting the @type is the most straightforward way of selecting your root node. The structure is built outward from the root node. You can define a leaf node in the structure by adding an opening and closing bracket, as shown in the example. To define a nested node you first need to define the key that is a object property in the JSON-LD response that points to another IRI. Then from that IRI the node is created filling in the properties of that node. { \"@context\": { \"addresses\": \"ex:address\", \"Address\": \"ex:Address\", \"Object\": \"ex:Object\", \"street\": \"ex:street\", \"number\": { \"@id\": \"ex:number\", \"@type\": \"xsd:integer\" }, \"labels\": { \"@id\": \"ex:label\", \"@container\": \"@set\" }, \"ex\": \"https://triply.cc/example/\", \"xsd\": \"http://www.w3.org/2001/XMLSchema#\" }, \"@type\": \"Object\", \"labels\": {}, \"addresses\": { \"street\": {}, \"number\": {} } } The JSON-LD frame together with the SPARQL query will now result in a REST-API result: curl -X POST https://api.triplydb.com/queries/JD/JSON-LD-frame/run \\ -H 'Accept: application/ld+json;profile=http://www.w3.org/ns/json-ld#framed' \\ -H 'Content-type: application/json' \\ -d '{ \"@context\": { \"addresses\": \"ex:address\", \"Address\": \"ex:Address\", \"Object\": \"ex:Object\", \"street\": \"ex:street\", \"number\": { \"@id\": \"ex:number\", \"@type\": \"xsd:integer\" }, \"labels\": { \"@id\": \"ex:label\", \"@container\": \"@set\" }, \"ex\": \"https://triply.cc/example/\", \"xsd\": \"http://www.w3.org/2001/XMLSchema#\" }, \"@type\": \"Object\", \"labels\": {}, \"addresses\": { \"street\": {}, \"number\": {} } }' The JSON-LD frame turns SPARQL results for the query in step 1 into a format that is accepted as plain RESTful API request.","title":"The Frame"},{"location":"generics/JSON-LD-frames/#using-sparql-to-create-a-frame","text":"Another way to create a frame is by using the SPARQL editor in TriplyDB. You can access the JSON-LD editor by clicking the three dots next to the SPARQL editor, and then selecting \"To JSON-LD frame editor\". Afterwards, the JSON script from above should be added to the JSON-LD Frame editor. Running the script results in the following REST-API result: This can also be accessed by the generated API Link above the SPARQL editor. Copying and pasting the generated link will direct you to a page where you can view the script:","title":"Using SPARQL to create a frame"},{"location":"generics/api-token/","text":"On this page: API Token API Token \u00b6 Applications (see TriplyDB.js ) and pipelines (see TriplyETL ) often require access rights to interact with TriplyDB instances. Specifically, reading non-public data and writing any (public or non-public) data requires setting an API token. The token ensures that only users that are specifically authorized for certain datasets are able to access and/or modify those datasets. The following steps must be performed in order to create an API token: Log into the web GUI of the TriplyDB server where you have an account and for which you want to obtain special access rights in your application or pipeline. Many organizations use their own TriplyDB server. If your organization does not yet have a TriplyDB server, you can also create a free account over at TriplyDB.com . Go to your user settings page. This page is reached by clicking on the user menu in the top-right corner and choosing \u201cUser settings\u201d. Go to the \u201cAPI tokens\u201d tab. Click on \u201cCreate token\u201d. Enter a name that describes the purpose of the token. This can be the name of the application or pipeline for which the API token will be used. You can use the name to manage the token later. For example, you can remove tokens for applications that are no longer used later on. It is good practice to create different API tokens for different applications. Choose the permission level that is sufficient for what you want to do with the API token. Notice that \u201cManagement access\u201d is often not needed. \u201cRead access\u201d is sufficient for read-only applications. \u201cWrite access\u201d is sufficient for most pipelines and applications that require write access. Management access: if your application must create or change organization accounts in the TriplyDB server. Write access: if your application must write (meta)data in the TriplyDB server. Read access: if your application must read public and/or private data from the TriplyDB server. Click the \u201cCreate\u201d button to create your token. The token (a long sequence of characters) will now appear in a dialog. For security reasons, the token will only be shown once. You can copy the token over to the application where you want to use it.","title":"API Token"},{"location":"generics/api-token/#api-token","text":"Applications (see TriplyDB.js ) and pipelines (see TriplyETL ) often require access rights to interact with TriplyDB instances. Specifically, reading non-public data and writing any (public or non-public) data requires setting an API token. The token ensures that only users that are specifically authorized for certain datasets are able to access and/or modify those datasets. The following steps must be performed in order to create an API token: Log into the web GUI of the TriplyDB server where you have an account and for which you want to obtain special access rights in your application or pipeline. Many organizations use their own TriplyDB server. If your organization does not yet have a TriplyDB server, you can also create a free account over at TriplyDB.com . Go to your user settings page. This page is reached by clicking on the user menu in the top-right corner and choosing \u201cUser settings\u201d. Go to the \u201cAPI tokens\u201d tab. Click on \u201cCreate token\u201d. Enter a name that describes the purpose of the token. This can be the name of the application or pipeline for which the API token will be used. You can use the name to manage the token later. For example, you can remove tokens for applications that are no longer used later on. It is good practice to create different API tokens for different applications. Choose the permission level that is sufficient for what you want to do with the API token. Notice that \u201cManagement access\u201d is often not needed. \u201cRead access\u201d is sufficient for read-only applications. \u201cWrite access\u201d is sufficient for most pipelines and applications that require write access. Management access: if your application must create or change organization accounts in the TriplyDB server. Write access: if your application must write (meta)data in the TriplyDB server. Read access: if your application must read public and/or private data from the TriplyDB server. Click the \u201cCreate\u201d button to create your token. The token (a long sequence of characters) will now appear in a dialog. For security reasons, the token will only be shown once. You can copy the token over to the application where you want to use it.","title":"API Token"},{"location":"generics/sparql-pagination/","text":"On this page: SPARQL Pagination Pagination with the saved query API Pagination with TriplyDB.js SPARQL Pagination \u00b6 This page explains how to retrieve all results from a SPARQL query using pagination. Often SPARQL queries can return more than 10.000 results, but due to limitations the result set will only consist out of the first 10.000 results. To retrieve more than 10.000 results you can use pagination. TriplyDB supports two methods to retrieve all results from a SPARQL query. Pagination with the saved query API or Pagination with TriplyDB.js. Pagination with the saved query API \u00b6 Each TriplyDB instance has a fully RESTful API. The TriplyDB RESTful API is extended for saved SPARQL queries. The API for saved queries is extended with two arguments that the query is able to process paginated result sets. The arguments are \u2018page\u2019 and \u2018pageSize\u2019. An example of a paginated saved SPARQL query request would look like: https://api.triplydb.com/queries/academy/pokemon-color/run?page=3&pageSize=100 The example request argument \u2018page\u2019 corresponds to the requested page. In the example request this would correspond to the third page of paginated SPARQL query, according to the \u2018pageSize\u2019. There is no maximum \u2018page\u2019 limit, as a SPARQL query could return an arbitrary number of results. When no results can be retrieved for the requested page an empty page will be returned. The argument \u2018pageSize\u2019 corresponds to how many results each page would contain. The \u2018pageSize\u2019 has a default of 100 returned results and a maximum \u2018pageSize\u2019 limit of 10.000 returned results. The request will return an error when the \u2018pageSize\u2019 is set higher than 10.000. The RESTful API for the saved SPARQL queries follows the RFC 8288 standard. The request will return a response body containing the result set and a response header. The response header contains a link header with the relative \"next\" request, the relative \"prev\" request, and the relative \"first\" request. By following the \"next\" link header request you can chain the pagination and retrieve all results. link: ; rel=\"next\", ; rel=\"prev\", ; rel=\"first\" Pagination with TriplyDB.js \u00b6 TriplyDB.js is the official programming library for interacting with TriplyDB . TriplyDB.js allows the user to connect to a TriplyDB instance via the TypeScript language. TriplyDB.js has the advantage that it can handle pagination internally so it can reliably retrieve a large number of results. To get the output for a construct or select query, follow these steps: 1. Import the triplyDB.js library and set your parameters, regarding the TriplyDB instance and the account in which you have saved the query as well as the name of the query. Do not forget that we perform TriplyDB.js requests within an async context . import Client from '@triply/triplydb' async function run() { // Your code goes here. const client = Client.get({token: process.env.TRIPLYDB_TOKEN}) const account = await client.getAccount('account-name') const query = await account.getQuery('name-of-some-query') } run() 2. Get the results of a query by setting a results variable. More specifically, for construct queries you use the statements() call: const query = await account.getQuery('name-of-some-query') const results = query.results().statements() For select queries you use the bindings() call: const query = await account.getQuery('name-of-some-query') const results = query.results().bindings() Additionally, saved queries can have 'API variables' that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows: // For SPARQL construct queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).statements() // For SPARQL select queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).bindings() 3. To iterate the results of your SPARQL query you have three options: 3.1. Iterate through the results per row in a for -loop: // Iterating over the results. for await (const row of results) { // execute something } Note: For select queries the for -loop iterates over the rows of the result set. For construct queries the for -loop iterates over the statements in the result set. 3.2. Save the results to a file. This is only supported for SPARQL construct queries: // Saving the results of a SPARQL construct query to a file. await results.toFile('my-file.nt') 3.3. Load all results into memory in the form of an Array. Note that this is almost never used. If you want to process results, then use the 3a option; if you want to persist results, then option 3b suits better. // Loading results for a SPARQL construct or SPARQL select query into memory. const array = await results.toArray()","title":"SPARQL pagination"},{"location":"generics/sparql-pagination/#sparql-pagination","text":"This page explains how to retrieve all results from a SPARQL query using pagination. Often SPARQL queries can return more than 10.000 results, but due to limitations the result set will only consist out of the first 10.000 results. To retrieve more than 10.000 results you can use pagination. TriplyDB supports two methods to retrieve all results from a SPARQL query. Pagination with the saved query API or Pagination with TriplyDB.js.","title":"SPARQL Pagination"},{"location":"generics/sparql-pagination/#pagination-with-the-saved-query-api","text":"Each TriplyDB instance has a fully RESTful API. The TriplyDB RESTful API is extended for saved SPARQL queries. The API for saved queries is extended with two arguments that the query is able to process paginated result sets. The arguments are \u2018page\u2019 and \u2018pageSize\u2019. An example of a paginated saved SPARQL query request would look like: https://api.triplydb.com/queries/academy/pokemon-color/run?page=3&pageSize=100 The example request argument \u2018page\u2019 corresponds to the requested page. In the example request this would correspond to the third page of paginated SPARQL query, according to the \u2018pageSize\u2019. There is no maximum \u2018page\u2019 limit, as a SPARQL query could return an arbitrary number of results. When no results can be retrieved for the requested page an empty page will be returned. The argument \u2018pageSize\u2019 corresponds to how many results each page would contain. The \u2018pageSize\u2019 has a default of 100 returned results and a maximum \u2018pageSize\u2019 limit of 10.000 returned results. The request will return an error when the \u2018pageSize\u2019 is set higher than 10.000. The RESTful API for the saved SPARQL queries follows the RFC 8288 standard. The request will return a response body containing the result set and a response header. The response header contains a link header with the relative \"next\" request, the relative \"prev\" request, and the relative \"first\" request. By following the \"next\" link header request you can chain the pagination and retrieve all results. link: ; rel=\"next\", ; rel=\"prev\", ; rel=\"first\"","title":"Pagination with the saved query API"},{"location":"generics/sparql-pagination/#pagination-with-triplydbjs","text":"TriplyDB.js is the official programming library for interacting with TriplyDB . TriplyDB.js allows the user to connect to a TriplyDB instance via the TypeScript language. TriplyDB.js has the advantage that it can handle pagination internally so it can reliably retrieve a large number of results. To get the output for a construct or select query, follow these steps: 1. Import the triplyDB.js library and set your parameters, regarding the TriplyDB instance and the account in which you have saved the query as well as the name of the query. Do not forget that we perform TriplyDB.js requests within an async context . import Client from '@triply/triplydb' async function run() { // Your code goes here. const client = Client.get({token: process.env.TRIPLYDB_TOKEN}) const account = await client.getAccount('account-name') const query = await account.getQuery('name-of-some-query') } run() 2. Get the results of a query by setting a results variable. More specifically, for construct queries you use the statements() call: const query = await account.getQuery('name-of-some-query') const results = query.results().statements() For select queries you use the bindings() call: const query = await account.getQuery('name-of-some-query') const results = query.results().bindings() Additionally, saved queries can have 'API variables' that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows: // For SPARQL construct queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).statements() // For SPARQL select queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).bindings() 3. To iterate the results of your SPARQL query you have three options: 3.1. Iterate through the results per row in a for -loop: // Iterating over the results. for await (const row of results) { // execute something } Note: For select queries the for -loop iterates over the rows of the result set. For construct queries the for -loop iterates over the statements in the result set. 3.2. Save the results to a file. This is only supported for SPARQL construct queries: // Saving the results of a SPARQL construct query to a file. await results.toFile('my-file.nt') 3.3. Load all results into memory in the form of an Array. Note that this is almost never used. If you want to process results, then use the 3a option; if you want to persist results, then option 3b suits better. // Loading results for a SPARQL construct or SPARQL select query into memory. const array = await results.toArray()","title":"Pagination with TriplyDB.js"},{"location":"triply-api/","text":"On this page: Triply API Authentication Creating an API token Using the API token Important Security Considerations Exporting linked data Datasets Create a dataset Upload linked data Upload assets Accounts Queries Query metadata (GRLC) LD Browser API Triple Pattern Fragments (TPF) URI path Reply format Query parameters Example request Exporting data Query parameters Example requests Services Create a service Synchronize a service SPARQL Sending a SPARQL Query request SPARQL Query result formats Examples of SPARQL Query requests GET request URL-encoded POST request Direct POST request SPARQL JSON SPARQL XML SPARQL tab-separated values SPARQL comma-separated values JSON-LD N-Quads N-Triples TriG Turtle GraphQL URI path Requests and Response Example Elasticsearch URI path Reply format Examples Simple search Custom search Count API Setting up index templates for ElasticSearch Index templates Component templates Triply API \u00b6 Each Triply instance has a fully RESTful API. All functionality, from managing the Triply instance to working with your data, is done through the API. This document describes the general setup of the API, contact support@triply.cc for more information. Authentication \u00b6 When a dataset is published publicly, most of the read operation on that dataset can be performed without authentication. Write operations and read operations on datasets that are published internally or privately require authentication. Creating an API token \u00b6 Authentication is implemented through API tokens. An API token can be created within the TriplyDB UI in the following way: Log into your TriplyDB instance. Click on the user menu in the top-right corner and click on \u201cUser settings\u201d. Go to the \u201cAPI tokens\u201d tab. Click the \u201cCreate token\u201d button, enter a description for the token (e.g., \u201ctest-token\u201d) and select the appropriate access rights. Click on \u201cCreate\u201d and copy the created API token (a lengthy string of characters). This string is only shown once, upon creation, and must not be shared with others. (Other users can create their own token in the here described way.) Using the API token \u00b6 API tokens are used by specifying them in an HTTP request header as follows: Authorization: Bearer TOKEN In the above, TOKEN should be replaced by your personal API token (a lengthy sequence of characters). See Creating an API token for information on how to create an API token. Important Security Considerations \u00b6 Do Not Commit Your Token to a Git Repository : Under no circumstances should you commit your TriplyDB token to a Git repository. This practice is not allowed according to our ISO standards. Do Not Share Your Token: Avoid sharing your TriplyDB token with anyone who should not have access to your TriplyDB resources . Tokens should be treated as sensitive information and shared only with trusted parties. Change Tokens Regularly : To enhance security, consider regularly generating a new token to replace the existing one especially if you suspect any compromise. Exporting linked data \u00b6 Every TriplyDB API path that returns linked data provides a number of serializations to choose from. We support the following serializations: Serialization Media type File extension TriG application/trig .trig N-Triples application/n-triples .nt N-Quads application/n-quads .nq Turtle text/turtle .ttl JSON-LD application/ld+json .jsonld To request a serialization, use one of the following mechanisms: Add an Accept header to the request. E.g. Accept: application/n-triples Add the extension to the URL path. E.g. https://api.triplydb.com/datasets/Triply/iris/download.nt Datasets \u00b6 Triply API requests are always directed towards a specific URI path. URI paths will often have the following form: https://api.INSTANCE/datasets/ACCOUNT/DATASET/ Upper-case letter words must be replaced by the following values: INSTANCE :: The host name of the TriplyDB instance that you want to use. ACCOUNT :: The name of a specific user or a specific organization. DATASET :: The name of a specific dataset. Here is an example of a URI path that points to the Triply API for the Pok\u00e9mon dataset: https://api.triplydb.com/datasets/academy/pokemon/ Create a dataset \u00b6 You can create a new dataset via the Triply API. You need to use the API Token and send an HTTP POST request with data specifying: name , accessLevel and displayName . The example of the URI: curl -H 'Authorization: Bearer TOKEN' -H 'Content-Type: application/json' -X POST https://api.INSTANCE/datasets/ACCOUNT/ -d '{\"name\": \"NAME\", \"accessLevel\": \"ACCESS_LEVEL\", \"displayName\": \"DISPLAY_NAME\"}' Upper-case letter words in json after -d must be replaced by the following values: NAME :: The name of the dataset in the url. ACCESS_LEVEL :: public , private or internal . For more information visit Access levels in TriplyDB . DISPLAY_NAME :: The display name of the dataset. Upload linked data \u00b6 You can upload linked data via the Triply API. You need to use the API Token and send an HTTP POST request with data specifying the local file path. The list of supported file extensions can be checked in Adding data: File upload documentation. The example of such a request: curl -H 'Authorization: Bearer TOKEN' -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/jobs -F file=@FILENAME Upper-case letter words must be replaced by the following values: TOKEN :: Your TriplyDB token. INSTANCE :: The domain of your instance ACCOUNT :: Your account name DATASET :: The dataset name FILENAME :: The path to the file you want to upload A request looks like this: curl -H 'Authorization: Bearer xxxxxx' -X POST https://api.triplydb.com/datasets/my-account-name/my-dataset-name/jobs -F file=@./myfile.trig Limitations : We only support this API route for uploads less than 5MB. To upload more data, use: TriplyDB-JS : See the importFrom* methods under the Dataset class . TriplyDB Command-line Interface Upload assets \u00b6 You can upload assets via the Triply API. You need to use the API Token and send an HTTP POST request with data specifying the local file path. To add a new asset: curl -H \"Authorization: Bearer TOKEN\" -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/assets -F file=@FILENAME To add a version to an existing asset: curl -H \"Authorization: Bearer TOKEN\" -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/assets/IDENTIFIER -F file=@FILENAME Upper-case letter words must be replaced by the following values: TOKEN :: Your TriplyDB token. INSTANCE :: The domain of your instance ACCOUNT :: Your account name DATASET :: The dataset name IDENTIFIER :: The asset identifier where you'd like to add a new version to. FILENAME :: The path to the file you want to upload This request returns a JSON object, that includes (among other things) an identifier key, which can be used as a persistent identifier for this asset. A request to add a new asset looks like this: curl -H 'Authorization: Bearer xxxxxx' -X POST https://api.triplydb.com/datasets/my-account-name/my-dataset-name/assets -F file=@./myfile.txt A request to add a version to an existing asset looks like this: curl -H 'Authorization: Bearer xxxxxx' -X POST https://api.triplydb.com/datasets/my-account-name/my-dataset-name/assets/yyyyy -F file=@./myfile.txt Limitations : We only support this API route for uploads less than 5MB. To upload more data, use: TriplyDB-JS : See the uploadAsset methods under the Dataset class . TriplyDB Command-line Interface Accounts \u00b6 Information about TriplyDB accounts (organizations and users) can be retrieved from the following API path: https://api.INSTANCE/accounts Upper-case letter words must be replaced by the following values: INSTANCE :: The host name of the TriplyDB instance that you want to use. Here is an example of a URI path that points to the Triply API for the Triply organization account: https://api.triplydb.com/accounts/Triply Queries \u00b6 TriplyDB allows users to save SPARQL queries. The metadata for all saved query can be accessed as follows: https://api.triplydb.com/queries By adding an account name (for example: 'Triply'), metadata for all saved queries for that account can be accessed as follows: https://api.triplydb.com/queries/Triply By adding an account name and a query name (for example: 'Triply/flower-length'), metadata for one specific saved query can be accessed as follows: https://api.triplydb.com/queries/Triply/flower-length Query metadata (GRLC) \u00b6 You can retrieve a text-based version of each query, by requesting the text/plain content type: curl -vL -H 'Accept: text/plain' 'https://api.triplydb.com/queries/JD/pokemonNetwork' This returns the query string, together with metadata annotations. These metadata annotations use the GRLC format . For example: #+ description: This query shows a small subgraph from the Pokemon dataset. #+ endpoint: https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql #+ endpoint_in_url: false construct where { ?s ?p ?o. } limit 100 Notice that the GRLC annotations are encoded in SPARQL comments, i.e. lines that start with the hash character ( # ). This makes the result immediately usable as a SPARQL query. The above example includes the following GRLC annotations: description gives a human-readable description of the meaning of the query. This typically includes an explanation of the purpose or goal for which this query is used, the content returned, or the process or task in which this query is used. endpoint The URL of the SPARQL endpoint where queries are sent to. endpoint_in_url configures whether the URL of the SPARQL endpoint should be specified through the API. In TriplyDB, this configuration is by default set to false . (Users of the RESTful API typically expect domain parameters such as countryName or maximumAge , but they do not necessarily expect technical parameters like an endpoint URL.) LD Browser API \u00b6 Triply APIs provide a convenient way to access data used by LD Browser , which offers a comprehensive overview of a specific IRI. By using Triply API for a specific IRI, you can retrieve the associated 'document' in the .nt format that describes the IRI. To make an API request for a specific instance, you can use the following URI path: https://api.triplydb.com/datasets/ACCOUNT/DATASET/describe.nt?resource=RESOURCE To illustrate this, let's take the example of the DBpedia dataset and the specific instance of 'Mona Lisa' . If you use this URI path: https://api.triplydb.com/datasets/DBpedia-association/dbpedia/describe.nt?resource=http%3A%2F%2Fdbpedia.org%2Fresource%2FMona_Lisa In your browser, the .nt document describing the 'Mona Lisa' instance will be automatically downloaded. You can then upload this file to a dataset and visualize it in a graph . Figure 1 illustrates the retrieved graph for the \u2018Mona Lisa\u2019 instance. The requested resource will be displayed in the center of the graph, forming an 'ego graph'. It will include all direct properties, as well as some indirect properties that are also pulled in by LD Browser. The labels for all classes and properties will be included for easy human-readable display. In addition, this API also supports traversing blank node-replacing well-known IRIs (CBD style), and limits the number of objects per subject/property to manage the description size. This corresponds to the \"Show more\" button in the LD Browser GUI, ensuring a manageable and user-friendly experience. Triple Pattern Fragments (TPF) \u00b6 Triple Pattern Fragments (TPF) is a community standard that allows individual linked datasets to be queried for Triple Patterns (TP), a subset of the more complex SPARQL query language. The Triply API implements Triple Pattern Fragments version 2019-01-18 and Linked Data Fragments version 2016-06-05. The Triple Pattern Fragments (TPF) API is available for all datasets in Triply and does not require running a dedicated service. URI path \u00b6 TPF requests are sent to the following URI path: https://api.INSTANCE/datasets/ACCOUNT/DATATSET/fragments Reply format \u00b6 Since TPF replies distinguish between data and metadata that are stored in different graphs, it is recommended to request the TriG content type with the following HTTP request header: Accept: application/trig Query parameters \u00b6 Triple Pattern Fragments (TPF) uses the following query parameters in order to retrieve only those triples that adhere to a specified Triple Pattern: Key Value Purpose subject A URL-encoded IRI. Only return triples where the given IRI appears in the subject position. predicate A URL-encoded IRI. Only return triples where the given IRI appears in the predicate position. object A URL-encoded IRI or literal. Only return triples where the given IRI or literal appears in the object position. Example request \u00b6 curl -G \\ 'https://api.triplydb.com/datasets/academy/pokemon/fragments' \\ --data-urlencode 'predicate=http://www.w3.org/2000/01/rdf-schema#label' \\ -H 'Accept: application/trig' Exporting data \u00b6 To export the linked data, use the following path: https://api.INSTANCE/datasets/ACCOUNT/DATATSET/download Query parameters \u00b6 By default, an export includes all linked data graphs. Use a query argument to specify a particular graph. Key Value Purpose graph A URL-encoded IRI. Only download the export of the given graph IRI. Therefore, to export the linked data of a graph , use the following path: https://api.INSTANCE/datasets/ACCOUNT/DATATSET/download/?graph=GRAPH To find out which graphs are available, use the following path: https://api.INSTANCE/datasets/ACCOUNT/DATATSET/graphs Example requests \u00b6 Export a dataset: curl 'https://api.triplydb.com/datasets/academy/pokemon/download' \\ -H 'Accept: application/trig' > exportDataset.trig.gz Export a graph: First, find out which graphs are available: curl 'https://api.triplydb.com/datasets/academy/pokemon/graphs' Then, download one of the graph: curl 'curl 'https://api.triplydb.com/datasets/academy/pokemon/download?graph=https://triplydb.com/academy/pokemon/graphs/data' -H 'Accept: application/trig' > exportGraph.trig.gz Services \u00b6 Some API requests require the availability of a specific service over the dataset. These requests are directed towards a URI path of the following form: https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/SERVICE/ Upper-case letter words must be replaced by the following values: SERVICE :: The name of a specific service that has been started for the corresponding dataset. See the previous section for Datasets to learn the meaning of INSTANCE , ACCOUNT , and DATASET . Here is an example of a URI path that points to a SPARQL endpoint over the Pok\u00e9mon dataset: https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/ See the following sections for more information on how to query the endpoints provided by services: SPARQL Elasticsearch Create a service \u00b6 You can create a service for a dataset via TriplyDB API. You need to use the API Token and send an HTTP POST request with data specifying: \"type\" and \"name\" . The example of the URI: curl -H 'Authorization: Bearer TOKEN' -H 'Content-Type: application/json' -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/ -d '{\"type\": \"TYPE\", \"name\": \"NAME\"}' Upper-case letter words in json after -d must be replaced by the following values: TYPE :: SPARQL ( virtuoso or jena ) or Elasticsearch NAME :: The name of the service Synchronize a service \u00b6 You can synchronize existing service for a dataset via TriplyDB API. You need to use the API Token and send an HTTP POST request with data: {\"sync\": \"true\"} The example of the URI: curl -H 'Authorization: Bearer TOKEN' -H 'Content-Type: application/json' -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/SERVICE -d '{\"sync\": \"true\"}' SPARQL \u00b6 There are two service types in TriplyDB that expose the SPARQL 1.1 Query Language: \"Sparql\" and \"Jena\". The former works well for large quantities of instance data with a relatively small data model; the latter works well for smaller quantities of data with a richer data model. SPARQL services expose a generic endpoint URI at the following location (where ACCOUNT , DATASET and SERVICE are user-chosen names): https://api.triplydb.com/datasets/ACCOUNT/DATASET/services/SERVICE/sparql Everybody who has access to the dataset also has access to its services, including its SPARQL services: - For Public datasets, everybody on the Internet or Intranet can issue queries. - For Internal datasets, only users that are logged into the triple store can issue queries. - For Private datasets, only users that are logged into the triple store and are members of ACCOUNT can issue queries. Notice that for professional use it is easier and better to use saved queries . Saved queries have persistent URIs, descriptive metadata, versioning, and support for reliable large-scale pagination ( see how to use pagination with saved query API ). Still, if you do not have a saved query at your disposal and want to perform a custom SPARQL request against an accessible endpoint, you can do so. TriplyDB implements the SPARQL 1.1 Query Protocol standard for this purpose. Sending a SPARQL Query request \u00b6 According to the SPARQL 1.1 Protocol, queries can be send in the 3 different ways that are displayed in Table 1 . For small query strings it is possible to send an HTTP GET request (row 1 in Table 1 ). A benefit of this approach is that all information is stored in one URI. For public data, copy/pasting this URI in a web browser runs the query. For larger query strings it is required to send an HTTP POST request (rows 2 and 3 in Table 1 ). The reason for this is that longer query strings result in longer URIs when following the HTTP GET approach. Some applications do not support longer URIs, or they even silently truncate them resulting in an error down the line. The direct POST approach (row 3 in Table 1 ) is the best of these 3 variants, since it most clearly communicates that it is sending a SPARQL query request (see the Content-Type column). HTTP Method Query String Parameters Request Content-Type Request Message Body query via GET GET query (exactly 1) default-graph-uri (0 or more) named-graph-uri (0 or more) none none query via URL-encoded POST POST none application/x-www-form-urlencoded URL-encoded, ampersand-separated query parameters. query (exactly 1) default-graph-uri (0 or more) named-graph-uri (0 or more) query via POST directly POST default-graph-uri (0 or more) named-graph-uri (0 or more) application/sparql-query Unencoded SPARQL query string Table 1 - Overview of the three different ways in which SPARQL queries can be issues over HTTP. SPARQL Query result formats \u00b6 SPARQL services are able to return results in different formats. The user can specify the preferred format by specifying the corresponding Media Type in the HTTP Accept header. TriplyDB supports the Media Types in the following table. Notice that the chosen result format must be supported for your query form. Alternatively, it is possible (but not preferred) to specify the requested format as an URI path suffix; see the GET request section for an example. Result format Media Type Query forms Suffix CSV text/csv Select .csv JSON application/json Ask, Select .json JSON-LD application/ld+json Construct, Describe .jsonld N-Quads application/n-quads Construct, Describe .nq N-Triples application/n-triples Construct, Describe .nt RDF/XML application/rdf+xml Construct, Describe SPARQL JSON application/sparql-results+json Ask, Select .srj SPARQL XML application/sparql-results+xml Ask, Select .srx TriG application/trig Construct, Describe .trig TSV text/tab-separated-values Select .tsv Turtle text/turtle Construct, Describe .ttl Examples of SPARQL Query requests \u00b6 This section contains examples of SPARQL HTTP requests. The requests run either of the following two SPARQL queries against a public SPARQL endpoint that contains data about Pokemon: select * { ?s ?p ?o. } limit 1 construct where { ?s ?p ?o. } limit 1 The examples made use of the popular command-line tool cURL . These examples should also work in any other HTTP client tool or library. GET request \u00b6 curl https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql?query=select%20%2A%20%7B%20%3Fs%20%3Fp%20%3Fo.%20%7D%20limit%201 Result: [ { \"s\": \"https://triplydb.com/academy/pokemon/vocab/\", \"p\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"o\": \"http://www.w3.org/2002/07/owl#Ontology\" } ] The following request is identical to the previous one, but adds the \".srj\" suffix to the URI path (see /sparql.srj ). All suffixes from the table in Section SPARQL Query result formats are supported. curl https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql.srj?query=select%20%2A%20%7B%20%3Fs%20%3Fp%20%3Fo.%20%7D%20limit%201 This returns the official SPARQL Result Set JSON (SRJ) format. Notice that this official format is more verbose than the standard JSON format: { \"head\": { \"link\": [], \"vars\": [ \"s\", \"p\", \"o\" ] }, \"results\": { \"bindings\": [ { \"s\": { \"type\": \"uri\", \"value\": \"https://triplydb.com/academy/pokemon/\" }, \"p\": { \"type\": \"uri\", \"value\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\" }, \"o\": { \"type\": \"uri\", \"value\": \"http://rdfs.org/ns/void#Dataset\" } } ] } } URL-encoded POST request \u00b6 curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Content-Type: application/x-www-form-urlencoded' \\ --data query=select%20%2A%20%7B%20%3Fs%20%3Fp%20%3Fo.%20%7D%20limit%201 Result: [ { \"s\": \"https://triplydb.com/academy/pokemon/vocab/\", \"p\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"o\": \"http://www.w3.org/2002/07/owl#Ontology\" } ] Direct POST request \u00b6 curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' Result: [ { \"s\": \"https://triplydb.com/academy/pokemon/vocab/\", \"p\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"o\": \"http://www.w3.org/2002/07/owl#Ontology\" } ] SPARQL JSON \u00b6 Like the previous example, but with an Accept header that specifies Media Type application/sparql-results+json : curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/sparql-results+json' \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' Result: { \"head\": { \"vars\": [\"s\", \"p\", \"o\"] }, \"results\": { \"bindings\": [ { \"s\": { \"type\": \"uri\", \"value\": \"https://triplydb.com/academy/pokemon/vocab/\" }, \"p\": { \"type\": \"uri\", \"value\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\" }, \"o\": { \"type\": \"uri\", \"value\": \"http://www.w3.org/2002/07/owl#Ontology\" } } ] } } SPARQL XML \u00b6 Like the previous example, but with Media Type application/sparql-results+xml in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/sparql-results+xml' \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' Result: https://triplydb.com/academy/pokemon/vocab/ http://www.w3.org/1999/02/22-rdf-syntax-ns#type http://www.w3.org/2002/07/owl#Ontology SPARQL tab-separated values \u00b6 Like the previous examples, but with Media Type text/tab-separated-values in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: text/tab-separated-values' \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' \"s\" \"p\" \"o\" \"https://triplydb.com/academy/pokemon/vocab/\" \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\" \"http://www.w3.org/2002/07/owl#Ontology\" SPARQL comma-separated values \u00b6 Like the previous examples, but with Media Type text/csv in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: text/csv' \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' Result: \"s\",\"p\",\"o\" \"https://triplydb.com/academy/pokemon/vocab/\",\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\",\"http://www.w3.org/2002/07/owl#Ontology\" JSON-LD \u00b6 Like the previous examples, but with a SPARQL construct query and Media Type application/ld+json in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/ld+json' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: { \"@graph\": [ { \"@id\": \"https://triplydb.com/academy/pokemon/vocab/\", \"@type\": \"http://www.w3.org/2002/07/owl#Ontology\" } ] } N-Quads \u00b6 Like the previous examples, but with Media Type application/n-quads in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/n-quads' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: { \"@graph\": [ { \"@id\": \"https://triplydb.com/academy/pokemon/vocab/\", \"@type\": \"http://www.w3.org/2002/07/owl#Ontology\" } ] } N-Triples \u00b6 Like the previous examples, but with Media Type application/n-triples in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/n-triples' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: . TriG \u00b6 Like the previous examples, but with Media Type application/trig in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/trig' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: @prefix rdf: . @prefix owl: . rdf:type owl:Ontology . Turtle \u00b6 Like the previous examples, but with Media Type text/turtle in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: text/turtle' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: @prefix rdf: . @prefix owl: . rdf:type owl:Ontology . GraphQL \u00b6 Some TriplyDB instances publish a GraphQL endpoint for every dataset. This endpoint can be used for GraphQL queries. It uses information from user-provided SHACL shapes to generate the GraphQL schema. See more information about this subject here . URI path \u00b6 GraphQL requests are sent to the following URI path: https://api.INSTANCE/datasets/ACCOUNT/DATASET/graphql Requests and Response \u00b6 The format of requests and corresponding responses are described by graphql.org Example \u00b6 Perform a search using the custom query: { \"query\": { \"{ CapitalConnection { edges { node { label } } } }\" } } This request is issued in the following way with the cURL command-line tool: curl -X POST https://api.triplydb.com/datasets/iish/cshapes/graphql \\ -d '{ \"query\":\"{ CapitalConnection { edges { node { label } } } }\"}' \\ -H \"Content-Type: application/json\" Elasticsearch \u00b6 The text search API returns a list of linked data entities based on a supplied text string. The text string is matched against the text in literals and IRIs that appear in the linked data description of the returned entities. The text search API is only available for a dataset after an Elasticsearch service has been created for that dataset. Two types of searches can be performed: a simple search, and a custom search. Simple searches require one search term for a fuzzy match. Custom searches accept a JSON object conforming to the Elasticsearch query DSL . URI path \u00b6 Text search requests are sent to the following URI path: https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/SERVICE/search Reply format \u00b6 The reply format is a JSON object. Search results are returned in the JSON array that is stored under key sequence \"hits\"/\"hits\" . The order in which search results appear in the array is meaningful: better matches appear earlier. Every search result is represented by a JSON object. The name of the linked data entity is specified under key sequence \"_id\" . Properties of the linked data entity are stored as IRI keys. The values of these properties appear in a JSON array in order to allow more than one object term per predicate term (as is often the case in linked data). The following code snippet shows part of the reply for the below example request. The reply includes two results for search string \u201cmew\u201d, returning the Pok\u00e9mon Mew (higher ranked result) and Mewtwo (lower ranked result). { \"hits\": { \"hits\": [ { \"_id\": \"https://triply.cc/academy/pokemon/id/pokemon/mew\", \"http://open vocab org/terms/canonicalUri\": [ \"http://pokedex.dataincubator.org/pokemon/151\" ], \"https://triply cc/academy/pokemon/def/baseAttack\": [ 100 ], \"https://triply cc/academy/pokemon/def/name\": [ \"MEW\", \"MEW\", \"MEW\", \"MEW\", \"MEW\", \"\u30df\u30e5\u30a6\" ], \u2026 }, { \"_id\": \"https://triply.cc/academy/pokemon/id/pokemon/mewtwo\", \"http://open vocab org/terms/canonicalUri\": [ \"http://pokedex.dataincubator.org/pokemon/150\" ], \"https://triply cc/academy/pokemon/def/baseAttack\": [ 110 ], \"https://triply cc/academy/pokemon/def/name\": [ \"MEWTU\", \"MEWTWO\", \"MEWTWO\", \"MEWTWO\", \"MEWTWO\", \"\u30df\u30e5\u30a6\u30c4\u30fc\" ], \u2026 } ] }, \u2026 } Examples \u00b6 Simple search \u00b6 Perform a search for the string mew : curl 'https://api.triplydb.com/datasets/academy/pokemon/services/search/search?query=mew' Custom search \u00b6 Perform a search using the custom query: { \"query\": { \"simple_query_string\": { \"query\": \"pikachu\" } } } This request is issued in the following way with the cURL command-line tool: curl -X POST 'https://api.triplydb.com/datasets/academy/pokemon/services/search/search' \\ -d '{\"query\":{\"simple_query_string\":{\"query\":\"pikachu\"}}}' \\ -H 'content-type: application/json' Count API \u00b6 Elasticsearch allows the number of results to be determined without having to actually retrieve all these results. This is done with the \"Count API\". This API comes in handy when the number of results is shown in applications such as faceted search interfaces. The following two requests return the number of results for the search strings \"Iris\" and \"Setosa\". Notice that \"Iris\" occurs more often (184 times) than \"Setosa\" (52 times): curl 'https://api.triplydb.com/datasets/Triply/iris/services/iris-es/_count' -H 'Content-Type: application/json' --data-raw $'{\"query\": { \"simple_query_string\": { \"query\": \"Iris\" } } }' {\"count\":184,\"_shards\":{\"total\":1,\"successful\":1,\"skipped\":0,\"failed\":0}} and: curl 'https://api.triplydb.com/datasets/Triply/iris/services/iris-es/_count' -H 'Content-Type: application/json' --data-raw $'{\"query\": { \"simple_query_string\": { \"query\": \"Setosa\" } } }' {\"count\":52,\"_shards\":{\"total\":1,\"successful\":1,\"skipped\":0,\"failed\":0}} Setting up index templates for ElasticSearch \u00b6 TriplyDB allows you to configure a custom mapping for Elasticsearch services in TriplyDB using index templates. Index templates \u00b6 Index templates make it possible to create indices with user defined configuration, which an index can then pull from. A template will be defined with a name pattern and some configuration in it. If the name of the index matches the template\u2019s naming pattern, the new index will be created with the configuration defined in the template. Official documentation from ElasticSearch on how to use Index templates can be found here . Index templates on TriplyDB can be configured through either TriplyDB API or TriplyDB-JS . Index template can be created by making a POST request to the following URL: https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/ with this body: { \"type\": \"elasticSearch\", \"name\": \"SERVICE_NAME\", \"config\": { \"indexTemplates\": [ { \"index_patterns\": \"index\", \"name\": \"TEMPLATE_NAME\", ... } ] } } index_patterns and name are obligatory fields to include in the body of index template. It's important that every index template has the field \"index_patterns\" equal \"index\" ! Below is the example of the post request: curl -H \"Authorization: Bearer TRIPLYDB_TOKEN\" -H \"Content-Type: application/json\" -d '{\"type\":\"elasticSearch\",\"name\":\"SERVICE_NAME\",\"config\":{\"indexTemplates\":[{\"index_patterns\":\"index\", \"name\": \"TEMPLATE_NAME\"}]}}' -X POST \"https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/\" Component templates \u00b6 Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. You can find the official documentation on their use in ElasticSearch here . They can be configured through either TriplyDB API or TriplyDB-JS . A component template can be created by making a POST request to the following URL: https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/ with this body: { \"type\": \"elasticSearch\", \"name\": \"SERVICE_NAME\", \"config\": { \"componentTemplates\": [ { \"name\": \"TEMPLATE_NAME\", \"template\": { \"mappings\": { \"properties\": { ... } } } ... } ] } } name and template are obligatory fields to include in the body of component template. Component template can only be created together with an index template. In this case Index template needs to contain the field composed_of with the name of the component template. Below is an example of a POST request to create a component template for the property https://schema.org/dateCreated to be of type date . curl -H \"Authorization: Bearer TRIPLYDB_TOKEN\" -H \"Content-Type: application/json\" -d '{\"type\":\"elasticSearch\",\"name\":\"SERVICE_NAME\",\"config\":{\"indexTemplates\":[{\"index_patterns\":\"index\", \"name\": \"INDEX_TEMPLATE_NAME\",\"composed_of\":[\"COMPONENT_TEMPLATE_NAME\"]}], \"componentTemplates\":[{\"name\":\"COMPONENT_TEMPLATE_NAME\",\"template\":{\"mappings\":{\"properties\":{\"https://schema org/dateCreated\":{\"type\":\"date\"}}}}}]}}' -X POST \"https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/\"","title":"API"},{"location":"triply-api/#triply-api","text":"Each Triply instance has a fully RESTful API. All functionality, from managing the Triply instance to working with your data, is done through the API. This document describes the general setup of the API, contact support@triply.cc for more information.","title":"Triply API"},{"location":"triply-api/#authentication","text":"When a dataset is published publicly, most of the read operation on that dataset can be performed without authentication. Write operations and read operations on datasets that are published internally or privately require authentication.","title":"Authentication"},{"location":"triply-api/#creating-an-api-token","text":"Authentication is implemented through API tokens. An API token can be created within the TriplyDB UI in the following way: Log into your TriplyDB instance. Click on the user menu in the top-right corner and click on \u201cUser settings\u201d. Go to the \u201cAPI tokens\u201d tab. Click the \u201cCreate token\u201d button, enter a description for the token (e.g., \u201ctest-token\u201d) and select the appropriate access rights. Click on \u201cCreate\u201d and copy the created API token (a lengthy string of characters). This string is only shown once, upon creation, and must not be shared with others. (Other users can create their own token in the here described way.)","title":"Creating an API token"},{"location":"triply-api/#using-the-api-token","text":"API tokens are used by specifying them in an HTTP request header as follows: Authorization: Bearer TOKEN In the above, TOKEN should be replaced by your personal API token (a lengthy sequence of characters). See Creating an API token for information on how to create an API token.","title":"Using the API token"},{"location":"triply-api/#important-security-considerations","text":"Do Not Commit Your Token to a Git Repository : Under no circumstances should you commit your TriplyDB token to a Git repository. This practice is not allowed according to our ISO standards. Do Not Share Your Token: Avoid sharing your TriplyDB token with anyone who should not have access to your TriplyDB resources . Tokens should be treated as sensitive information and shared only with trusted parties. Change Tokens Regularly : To enhance security, consider regularly generating a new token to replace the existing one especially if you suspect any compromise.","title":"Important Security Considerations"},{"location":"triply-api/#exporting-linked-data","text":"Every TriplyDB API path that returns linked data provides a number of serializations to choose from. We support the following serializations: Serialization Media type File extension TriG application/trig .trig N-Triples application/n-triples .nt N-Quads application/n-quads .nq Turtle text/turtle .ttl JSON-LD application/ld+json .jsonld To request a serialization, use one of the following mechanisms: Add an Accept header to the request. E.g. Accept: application/n-triples Add the extension to the URL path. E.g. https://api.triplydb.com/datasets/Triply/iris/download.nt","title":"Exporting linked data"},{"location":"triply-api/#datasets","text":"Triply API requests are always directed towards a specific URI path. URI paths will often have the following form: https://api.INSTANCE/datasets/ACCOUNT/DATASET/ Upper-case letter words must be replaced by the following values: INSTANCE :: The host name of the TriplyDB instance that you want to use. ACCOUNT :: The name of a specific user or a specific organization. DATASET :: The name of a specific dataset. Here is an example of a URI path that points to the Triply API for the Pok\u00e9mon dataset: https://api.triplydb.com/datasets/academy/pokemon/","title":"Datasets"},{"location":"triply-api/#create-a-dataset","text":"You can create a new dataset via the Triply API. You need to use the API Token and send an HTTP POST request with data specifying: name , accessLevel and displayName . The example of the URI: curl -H 'Authorization: Bearer TOKEN' -H 'Content-Type: application/json' -X POST https://api.INSTANCE/datasets/ACCOUNT/ -d '{\"name\": \"NAME\", \"accessLevel\": \"ACCESS_LEVEL\", \"displayName\": \"DISPLAY_NAME\"}' Upper-case letter words in json after -d must be replaced by the following values: NAME :: The name of the dataset in the url. ACCESS_LEVEL :: public , private or internal . For more information visit Access levels in TriplyDB . DISPLAY_NAME :: The display name of the dataset.","title":"Create a dataset"},{"location":"triply-api/#upload-linked-data","text":"You can upload linked data via the Triply API. You need to use the API Token and send an HTTP POST request with data specifying the local file path. The list of supported file extensions can be checked in Adding data: File upload documentation. The example of such a request: curl -H 'Authorization: Bearer TOKEN' -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/jobs -F file=@FILENAME Upper-case letter words must be replaced by the following values: TOKEN :: Your TriplyDB token. INSTANCE :: The domain of your instance ACCOUNT :: Your account name DATASET :: The dataset name FILENAME :: The path to the file you want to upload A request looks like this: curl -H 'Authorization: Bearer xxxxxx' -X POST https://api.triplydb.com/datasets/my-account-name/my-dataset-name/jobs -F file=@./myfile.trig Limitations : We only support this API route for uploads less than 5MB. To upload more data, use: TriplyDB-JS : See the importFrom* methods under the Dataset class . TriplyDB Command-line Interface","title":"Upload linked data"},{"location":"triply-api/#upload-assets","text":"You can upload assets via the Triply API. You need to use the API Token and send an HTTP POST request with data specifying the local file path. To add a new asset: curl -H \"Authorization: Bearer TOKEN\" -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/assets -F file=@FILENAME To add a version to an existing asset: curl -H \"Authorization: Bearer TOKEN\" -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/assets/IDENTIFIER -F file=@FILENAME Upper-case letter words must be replaced by the following values: TOKEN :: Your TriplyDB token. INSTANCE :: The domain of your instance ACCOUNT :: Your account name DATASET :: The dataset name IDENTIFIER :: The asset identifier where you'd like to add a new version to. FILENAME :: The path to the file you want to upload This request returns a JSON object, that includes (among other things) an identifier key, which can be used as a persistent identifier for this asset. A request to add a new asset looks like this: curl -H 'Authorization: Bearer xxxxxx' -X POST https://api.triplydb.com/datasets/my-account-name/my-dataset-name/assets -F file=@./myfile.txt A request to add a version to an existing asset looks like this: curl -H 'Authorization: Bearer xxxxxx' -X POST https://api.triplydb.com/datasets/my-account-name/my-dataset-name/assets/yyyyy -F file=@./myfile.txt Limitations : We only support this API route for uploads less than 5MB. To upload more data, use: TriplyDB-JS : See the uploadAsset methods under the Dataset class . TriplyDB Command-line Interface","title":"Upload assets"},{"location":"triply-api/#accounts","text":"Information about TriplyDB accounts (organizations and users) can be retrieved from the following API path: https://api.INSTANCE/accounts Upper-case letter words must be replaced by the following values: INSTANCE :: The host name of the TriplyDB instance that you want to use. Here is an example of a URI path that points to the Triply API for the Triply organization account: https://api.triplydb.com/accounts/Triply","title":"Accounts"},{"location":"triply-api/#queries","text":"TriplyDB allows users to save SPARQL queries. The metadata for all saved query can be accessed as follows: https://api.triplydb.com/queries By adding an account name (for example: 'Triply'), metadata for all saved queries for that account can be accessed as follows: https://api.triplydb.com/queries/Triply By adding an account name and a query name (for example: 'Triply/flower-length'), metadata for one specific saved query can be accessed as follows: https://api.triplydb.com/queries/Triply/flower-length","title":"Queries"},{"location":"triply-api/#query-metadata-grlc","text":"You can retrieve a text-based version of each query, by requesting the text/plain content type: curl -vL -H 'Accept: text/plain' 'https://api.triplydb.com/queries/JD/pokemonNetwork' This returns the query string, together with metadata annotations. These metadata annotations use the GRLC format . For example: #+ description: This query shows a small subgraph from the Pokemon dataset. #+ endpoint: https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql #+ endpoint_in_url: false construct where { ?s ?p ?o. } limit 100 Notice that the GRLC annotations are encoded in SPARQL comments, i.e. lines that start with the hash character ( # ). This makes the result immediately usable as a SPARQL query. The above example includes the following GRLC annotations: description gives a human-readable description of the meaning of the query. This typically includes an explanation of the purpose or goal for which this query is used, the content returned, or the process or task in which this query is used. endpoint The URL of the SPARQL endpoint where queries are sent to. endpoint_in_url configures whether the URL of the SPARQL endpoint should be specified through the API. In TriplyDB, this configuration is by default set to false . (Users of the RESTful API typically expect domain parameters such as countryName or maximumAge , but they do not necessarily expect technical parameters like an endpoint URL.)","title":"Query metadata (GRLC)"},{"location":"triply-api/#ld-browser-api","text":"Triply APIs provide a convenient way to access data used by LD Browser , which offers a comprehensive overview of a specific IRI. By using Triply API for a specific IRI, you can retrieve the associated 'document' in the .nt format that describes the IRI. To make an API request for a specific instance, you can use the following URI path: https://api.triplydb.com/datasets/ACCOUNT/DATASET/describe.nt?resource=RESOURCE To illustrate this, let's take the example of the DBpedia dataset and the specific instance of 'Mona Lisa' . If you use this URI path: https://api.triplydb.com/datasets/DBpedia-association/dbpedia/describe.nt?resource=http%3A%2F%2Fdbpedia.org%2Fresource%2FMona_Lisa In your browser, the .nt document describing the 'Mona Lisa' instance will be automatically downloaded. You can then upload this file to a dataset and visualize it in a graph . Figure 1 illustrates the retrieved graph for the \u2018Mona Lisa\u2019 instance. The requested resource will be displayed in the center of the graph, forming an 'ego graph'. It will include all direct properties, as well as some indirect properties that are also pulled in by LD Browser. The labels for all classes and properties will be included for easy human-readable display. In addition, this API also supports traversing blank node-replacing well-known IRIs (CBD style), and limits the number of objects per subject/property to manage the description size. This corresponds to the \"Show more\" button in the LD Browser GUI, ensuring a manageable and user-friendly experience.","title":"LD Browser API"},{"location":"triply-api/#triple-pattern-fragments-tpf","text":"Triple Pattern Fragments (TPF) is a community standard that allows individual linked datasets to be queried for Triple Patterns (TP), a subset of the more complex SPARQL query language. The Triply API implements Triple Pattern Fragments version 2019-01-18 and Linked Data Fragments version 2016-06-05. The Triple Pattern Fragments (TPF) API is available for all datasets in Triply and does not require running a dedicated service.","title":"Triple Pattern Fragments (TPF)"},{"location":"triply-api/#uri-path","text":"TPF requests are sent to the following URI path: https://api.INSTANCE/datasets/ACCOUNT/DATATSET/fragments","title":"URI path"},{"location":"triply-api/#reply-format","text":"Since TPF replies distinguish between data and metadata that are stored in different graphs, it is recommended to request the TriG content type with the following HTTP request header: Accept: application/trig","title":"Reply format"},{"location":"triply-api/#query-parameters","text":"Triple Pattern Fragments (TPF) uses the following query parameters in order to retrieve only those triples that adhere to a specified Triple Pattern: Key Value Purpose subject A URL-encoded IRI. Only return triples where the given IRI appears in the subject position. predicate A URL-encoded IRI. Only return triples where the given IRI appears in the predicate position. object A URL-encoded IRI or literal. Only return triples where the given IRI or literal appears in the object position.","title":"Query parameters"},{"location":"triply-api/#example-request","text":"curl -G \\ 'https://api.triplydb.com/datasets/academy/pokemon/fragments' \\ --data-urlencode 'predicate=http://www.w3.org/2000/01/rdf-schema#label' \\ -H 'Accept: application/trig'","title":"Example request"},{"location":"triply-api/#exporting-data","text":"To export the linked data, use the following path: https://api.INSTANCE/datasets/ACCOUNT/DATATSET/download","title":"Exporting data"},{"location":"triply-api/#query-parameters_1","text":"By default, an export includes all linked data graphs. Use a query argument to specify a particular graph. Key Value Purpose graph A URL-encoded IRI. Only download the export of the given graph IRI. Therefore, to export the linked data of a graph , use the following path: https://api.INSTANCE/datasets/ACCOUNT/DATATSET/download/?graph=GRAPH To find out which graphs are available, use the following path: https://api.INSTANCE/datasets/ACCOUNT/DATATSET/graphs","title":"Query parameters"},{"location":"triply-api/#example-requests","text":"Export a dataset: curl 'https://api.triplydb.com/datasets/academy/pokemon/download' \\ -H 'Accept: application/trig' > exportDataset.trig.gz Export a graph: First, find out which graphs are available: curl 'https://api.triplydb.com/datasets/academy/pokemon/graphs' Then, download one of the graph: curl 'curl 'https://api.triplydb.com/datasets/academy/pokemon/download?graph=https://triplydb.com/academy/pokemon/graphs/data' -H 'Accept: application/trig' > exportGraph.trig.gz","title":"Example requests"},{"location":"triply-api/#services","text":"Some API requests require the availability of a specific service over the dataset. These requests are directed towards a URI path of the following form: https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/SERVICE/ Upper-case letter words must be replaced by the following values: SERVICE :: The name of a specific service that has been started for the corresponding dataset. See the previous section for Datasets to learn the meaning of INSTANCE , ACCOUNT , and DATASET . Here is an example of a URI path that points to a SPARQL endpoint over the Pok\u00e9mon dataset: https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/ See the following sections for more information on how to query the endpoints provided by services: SPARQL Elasticsearch","title":"Services"},{"location":"triply-api/#create-a-service","text":"You can create a service for a dataset via TriplyDB API. You need to use the API Token and send an HTTP POST request with data specifying: \"type\" and \"name\" . The example of the URI: curl -H 'Authorization: Bearer TOKEN' -H 'Content-Type: application/json' -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/ -d '{\"type\": \"TYPE\", \"name\": \"NAME\"}' Upper-case letter words in json after -d must be replaced by the following values: TYPE :: SPARQL ( virtuoso or jena ) or Elasticsearch NAME :: The name of the service","title":"Create a service"},{"location":"triply-api/#synchronize-a-service","text":"You can synchronize existing service for a dataset via TriplyDB API. You need to use the API Token and send an HTTP POST request with data: {\"sync\": \"true\"} The example of the URI: curl -H 'Authorization: Bearer TOKEN' -H 'Content-Type: application/json' -X POST https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/SERVICE -d '{\"sync\": \"true\"}'","title":"Synchronize a service"},{"location":"triply-api/#sparql","text":"There are two service types in TriplyDB that expose the SPARQL 1.1 Query Language: \"Sparql\" and \"Jena\". The former works well for large quantities of instance data with a relatively small data model; the latter works well for smaller quantities of data with a richer data model. SPARQL services expose a generic endpoint URI at the following location (where ACCOUNT , DATASET and SERVICE are user-chosen names): https://api.triplydb.com/datasets/ACCOUNT/DATASET/services/SERVICE/sparql Everybody who has access to the dataset also has access to its services, including its SPARQL services: - For Public datasets, everybody on the Internet or Intranet can issue queries. - For Internal datasets, only users that are logged into the triple store can issue queries. - For Private datasets, only users that are logged into the triple store and are members of ACCOUNT can issue queries. Notice that for professional use it is easier and better to use saved queries . Saved queries have persistent URIs, descriptive metadata, versioning, and support for reliable large-scale pagination ( see how to use pagination with saved query API ). Still, if you do not have a saved query at your disposal and want to perform a custom SPARQL request against an accessible endpoint, you can do so. TriplyDB implements the SPARQL 1.1 Query Protocol standard for this purpose.","title":"SPARQL"},{"location":"triply-api/#sending-a-sparql-query-request","text":"According to the SPARQL 1.1 Protocol, queries can be send in the 3 different ways that are displayed in Table 1 . For small query strings it is possible to send an HTTP GET request (row 1 in Table 1 ). A benefit of this approach is that all information is stored in one URI. For public data, copy/pasting this URI in a web browser runs the query. For larger query strings it is required to send an HTTP POST request (rows 2 and 3 in Table 1 ). The reason for this is that longer query strings result in longer URIs when following the HTTP GET approach. Some applications do not support longer URIs, or they even silently truncate them resulting in an error down the line. The direct POST approach (row 3 in Table 1 ) is the best of these 3 variants, since it most clearly communicates that it is sending a SPARQL query request (see the Content-Type column). HTTP Method Query String Parameters Request Content-Type Request Message Body query via GET GET query (exactly 1) default-graph-uri (0 or more) named-graph-uri (0 or more) none none query via URL-encoded POST POST none application/x-www-form-urlencoded URL-encoded, ampersand-separated query parameters. query (exactly 1) default-graph-uri (0 or more) named-graph-uri (0 or more) query via POST directly POST default-graph-uri (0 or more) named-graph-uri (0 or more) application/sparql-query Unencoded SPARQL query string Table 1 - Overview of the three different ways in which SPARQL queries can be issues over HTTP.","title":"Sending a SPARQL Query request"},{"location":"triply-api/#sparql-query-result-formats","text":"SPARQL services are able to return results in different formats. The user can specify the preferred format by specifying the corresponding Media Type in the HTTP Accept header. TriplyDB supports the Media Types in the following table. Notice that the chosen result format must be supported for your query form. Alternatively, it is possible (but not preferred) to specify the requested format as an URI path suffix; see the GET request section for an example. Result format Media Type Query forms Suffix CSV text/csv Select .csv JSON application/json Ask, Select .json JSON-LD application/ld+json Construct, Describe .jsonld N-Quads application/n-quads Construct, Describe .nq N-Triples application/n-triples Construct, Describe .nt RDF/XML application/rdf+xml Construct, Describe SPARQL JSON application/sparql-results+json Ask, Select .srj SPARQL XML application/sparql-results+xml Ask, Select .srx TriG application/trig Construct, Describe .trig TSV text/tab-separated-values Select .tsv Turtle text/turtle Construct, Describe .ttl","title":"SPARQL Query result formats"},{"location":"triply-api/#examples-of-sparql-query-requests","text":"This section contains examples of SPARQL HTTP requests. The requests run either of the following two SPARQL queries against a public SPARQL endpoint that contains data about Pokemon: select * { ?s ?p ?o. } limit 1 construct where { ?s ?p ?o. } limit 1 The examples made use of the popular command-line tool cURL . These examples should also work in any other HTTP client tool or library.","title":"Examples of SPARQL Query requests"},{"location":"triply-api/#get-request","text":"curl https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql?query=select%20%2A%20%7B%20%3Fs%20%3Fp%20%3Fo.%20%7D%20limit%201 Result: [ { \"s\": \"https://triplydb.com/academy/pokemon/vocab/\", \"p\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"o\": \"http://www.w3.org/2002/07/owl#Ontology\" } ] The following request is identical to the previous one, but adds the \".srj\" suffix to the URI path (see /sparql.srj ). All suffixes from the table in Section SPARQL Query result formats are supported. curl https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql.srj?query=select%20%2A%20%7B%20%3Fs%20%3Fp%20%3Fo.%20%7D%20limit%201 This returns the official SPARQL Result Set JSON (SRJ) format. Notice that this official format is more verbose than the standard JSON format: { \"head\": { \"link\": [], \"vars\": [ \"s\", \"p\", \"o\" ] }, \"results\": { \"bindings\": [ { \"s\": { \"type\": \"uri\", \"value\": \"https://triplydb.com/academy/pokemon/\" }, \"p\": { \"type\": \"uri\", \"value\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\" }, \"o\": { \"type\": \"uri\", \"value\": \"http://rdfs.org/ns/void#Dataset\" } } ] } }","title":"GET request"},{"location":"triply-api/#url-encoded-post-request","text":"curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Content-Type: application/x-www-form-urlencoded' \\ --data query=select%20%2A%20%7B%20%3Fs%20%3Fp%20%3Fo.%20%7D%20limit%201 Result: [ { \"s\": \"https://triplydb.com/academy/pokemon/vocab/\", \"p\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"o\": \"http://www.w3.org/2002/07/owl#Ontology\" } ]","title":"URL-encoded POST request"},{"location":"triply-api/#direct-post-request","text":"curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' Result: [ { \"s\": \"https://triplydb.com/academy/pokemon/vocab/\", \"p\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"o\": \"http://www.w3.org/2002/07/owl#Ontology\" } ]","title":"Direct POST request"},{"location":"triply-api/#sparql-json","text":"Like the previous example, but with an Accept header that specifies Media Type application/sparql-results+json : curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/sparql-results+json' \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' Result: { \"head\": { \"vars\": [\"s\", \"p\", \"o\"] }, \"results\": { \"bindings\": [ { \"s\": { \"type\": \"uri\", \"value\": \"https://triplydb.com/academy/pokemon/vocab/\" }, \"p\": { \"type\": \"uri\", \"value\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\" }, \"o\": { \"type\": \"uri\", \"value\": \"http://www.w3.org/2002/07/owl#Ontology\" } } ] } }","title":"SPARQL JSON"},{"location":"triply-api/#sparql-xml","text":"Like the previous example, but with Media Type application/sparql-results+xml in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/sparql-results+xml' \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' Result: https://triplydb.com/academy/pokemon/vocab/ http://www.w3.org/1999/02/22-rdf-syntax-ns#type http://www.w3.org/2002/07/owl#Ontology ","title":"SPARQL XML"},{"location":"triply-api/#sparql-tab-separated-values","text":"Like the previous examples, but with Media Type text/tab-separated-values in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: text/tab-separated-values' \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' \"s\" \"p\" \"o\" \"https://triplydb.com/academy/pokemon/vocab/\" \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\" \"http://www.w3.org/2002/07/owl#Ontology\"","title":"SPARQL tab-separated values"},{"location":"triply-api/#sparql-comma-separated-values","text":"Like the previous examples, but with Media Type text/csv in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: text/csv' \\ -H 'Content-Type: application/sparql-query' \\ -d 'select * { ?s ?p ?o } limit 1' Result: \"s\",\"p\",\"o\" \"https://triplydb.com/academy/pokemon/vocab/\",\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\",\"http://www.w3.org/2002/07/owl#Ontology\"","title":"SPARQL comma-separated values"},{"location":"triply-api/#json-ld","text":"Like the previous examples, but with a SPARQL construct query and Media Type application/ld+json in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/ld+json' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: { \"@graph\": [ { \"@id\": \"https://triplydb.com/academy/pokemon/vocab/\", \"@type\": \"http://www.w3.org/2002/07/owl#Ontology\" } ] }","title":"JSON-LD"},{"location":"triply-api/#n-quads","text":"Like the previous examples, but with Media Type application/n-quads in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/n-quads' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: { \"@graph\": [ { \"@id\": \"https://triplydb.com/academy/pokemon/vocab/\", \"@type\": \"http://www.w3.org/2002/07/owl#Ontology\" } ] }","title":"N-Quads"},{"location":"triply-api/#n-triples","text":"Like the previous examples, but with Media Type application/n-triples in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/n-triples' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: .","title":"N-Triples"},{"location":"triply-api/#trig","text":"Like the previous examples, but with Media Type application/trig in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: application/trig' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: @prefix rdf: . @prefix owl: . rdf:type owl:Ontology .","title":"TriG"},{"location":"triply-api/#turtle","text":"Like the previous examples, but with Media Type text/turtle in the Accept header: curl -X POST https://api.triplydb.com/datasets/academy/pokemon/services/pokemon/sparql \\ -H 'Accept: text/turtle' \\ -H 'Content-Type: application/sparql-query' \\ -d 'construct where { ?s ?p ?o } limit 1' Result: @prefix rdf: . @prefix owl: . rdf:type owl:Ontology .","title":"Turtle"},{"location":"triply-api/#graphql","text":"Some TriplyDB instances publish a GraphQL endpoint for every dataset. This endpoint can be used for GraphQL queries. It uses information from user-provided SHACL shapes to generate the GraphQL schema. See more information about this subject here .","title":"GraphQL"},{"location":"triply-api/#uri-path_1","text":"GraphQL requests are sent to the following URI path: https://api.INSTANCE/datasets/ACCOUNT/DATASET/graphql","title":"URI path"},{"location":"triply-api/#requests-and-response","text":"The format of requests and corresponding responses are described by graphql.org","title":"Requests and Response"},{"location":"triply-api/#example","text":"Perform a search using the custom query: { \"query\": { \"{ CapitalConnection { edges { node { label } } } }\" } } This request is issued in the following way with the cURL command-line tool: curl -X POST https://api.triplydb.com/datasets/iish/cshapes/graphql \\ -d '{ \"query\":\"{ CapitalConnection { edges { node { label } } } }\"}' \\ -H \"Content-Type: application/json\"","title":"Example"},{"location":"triply-api/#elasticsearch","text":"The text search API returns a list of linked data entities based on a supplied text string. The text string is matched against the text in literals and IRIs that appear in the linked data description of the returned entities. The text search API is only available for a dataset after an Elasticsearch service has been created for that dataset. Two types of searches can be performed: a simple search, and a custom search. Simple searches require one search term for a fuzzy match. Custom searches accept a JSON object conforming to the Elasticsearch query DSL .","title":"Elasticsearch"},{"location":"triply-api/#uri-path_2","text":"Text search requests are sent to the following URI path: https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/SERVICE/search","title":"URI path"},{"location":"triply-api/#reply-format_1","text":"The reply format is a JSON object. Search results are returned in the JSON array that is stored under key sequence \"hits\"/\"hits\" . The order in which search results appear in the array is meaningful: better matches appear earlier. Every search result is represented by a JSON object. The name of the linked data entity is specified under key sequence \"_id\" . Properties of the linked data entity are stored as IRI keys. The values of these properties appear in a JSON array in order to allow more than one object term per predicate term (as is often the case in linked data). The following code snippet shows part of the reply for the below example request. The reply includes two results for search string \u201cmew\u201d, returning the Pok\u00e9mon Mew (higher ranked result) and Mewtwo (lower ranked result). { \"hits\": { \"hits\": [ { \"_id\": \"https://triply.cc/academy/pokemon/id/pokemon/mew\", \"http://open vocab org/terms/canonicalUri\": [ \"http://pokedex.dataincubator.org/pokemon/151\" ], \"https://triply cc/academy/pokemon/def/baseAttack\": [ 100 ], \"https://triply cc/academy/pokemon/def/name\": [ \"MEW\", \"MEW\", \"MEW\", \"MEW\", \"MEW\", \"\u30df\u30e5\u30a6\" ], \u2026 }, { \"_id\": \"https://triply.cc/academy/pokemon/id/pokemon/mewtwo\", \"http://open vocab org/terms/canonicalUri\": [ \"http://pokedex.dataincubator.org/pokemon/150\" ], \"https://triply cc/academy/pokemon/def/baseAttack\": [ 110 ], \"https://triply cc/academy/pokemon/def/name\": [ \"MEWTU\", \"MEWTWO\", \"MEWTWO\", \"MEWTWO\", \"MEWTWO\", \"\u30df\u30e5\u30a6\u30c4\u30fc\" ], \u2026 } ] }, \u2026 }","title":"Reply format"},{"location":"triply-api/#examples","text":"","title":"Examples"},{"location":"triply-api/#simple-search","text":"Perform a search for the string mew : curl 'https://api.triplydb.com/datasets/academy/pokemon/services/search/search?query=mew'","title":"Simple search"},{"location":"triply-api/#custom-search","text":"Perform a search using the custom query: { \"query\": { \"simple_query_string\": { \"query\": \"pikachu\" } } } This request is issued in the following way with the cURL command-line tool: curl -X POST 'https://api.triplydb.com/datasets/academy/pokemon/services/search/search' \\ -d '{\"query\":{\"simple_query_string\":{\"query\":\"pikachu\"}}}' \\ -H 'content-type: application/json'","title":"Custom search"},{"location":"triply-api/#count-api","text":"Elasticsearch allows the number of results to be determined without having to actually retrieve all these results. This is done with the \"Count API\". This API comes in handy when the number of results is shown in applications such as faceted search interfaces. The following two requests return the number of results for the search strings \"Iris\" and \"Setosa\". Notice that \"Iris\" occurs more often (184 times) than \"Setosa\" (52 times): curl 'https://api.triplydb.com/datasets/Triply/iris/services/iris-es/_count' -H 'Content-Type: application/json' --data-raw $'{\"query\": { \"simple_query_string\": { \"query\": \"Iris\" } } }' {\"count\":184,\"_shards\":{\"total\":1,\"successful\":1,\"skipped\":0,\"failed\":0}} and: curl 'https://api.triplydb.com/datasets/Triply/iris/services/iris-es/_count' -H 'Content-Type: application/json' --data-raw $'{\"query\": { \"simple_query_string\": { \"query\": \"Setosa\" } } }' {\"count\":52,\"_shards\":{\"total\":1,\"successful\":1,\"skipped\":0,\"failed\":0}}","title":"Count API"},{"location":"triply-api/#setting-up-index-templates-for-elasticsearch","text":"TriplyDB allows you to configure a custom mapping for Elasticsearch services in TriplyDB using index templates.","title":"Setting up index templates for ElasticSearch"},{"location":"triply-api/#index-templates","text":"Index templates make it possible to create indices with user defined configuration, which an index can then pull from. A template will be defined with a name pattern and some configuration in it. If the name of the index matches the template\u2019s naming pattern, the new index will be created with the configuration defined in the template. Official documentation from ElasticSearch on how to use Index templates can be found here . Index templates on TriplyDB can be configured through either TriplyDB API or TriplyDB-JS . Index template can be created by making a POST request to the following URL: https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/ with this body: { \"type\": \"elasticSearch\", \"name\": \"SERVICE_NAME\", \"config\": { \"indexTemplates\": [ { \"index_patterns\": \"index\", \"name\": \"TEMPLATE_NAME\", ... } ] } } index_patterns and name are obligatory fields to include in the body of index template. It's important that every index template has the field \"index_patterns\" equal \"index\" ! Below is the example of the post request: curl -H \"Authorization: Bearer TRIPLYDB_TOKEN\" -H \"Content-Type: application/json\" -d '{\"type\":\"elasticSearch\",\"name\":\"SERVICE_NAME\",\"config\":{\"indexTemplates\":[{\"index_patterns\":\"index\", \"name\": \"TEMPLATE_NAME\"}]}}' -X POST \"https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/\"","title":"Index templates"},{"location":"triply-api/#component-templates","text":"Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. You can find the official documentation on their use in ElasticSearch here . They can be configured through either TriplyDB API or TriplyDB-JS . A component template can be created by making a POST request to the following URL: https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/ with this body: { \"type\": \"elasticSearch\", \"name\": \"SERVICE_NAME\", \"config\": { \"componentTemplates\": [ { \"name\": \"TEMPLATE_NAME\", \"template\": { \"mappings\": { \"properties\": { ... } } } ... } ] } } name and template are obligatory fields to include in the body of component template. Component template can only be created together with an index template. In this case Index template needs to contain the field composed_of with the name of the component template. Below is an example of a POST request to create a component template for the property https://schema.org/dateCreated to be of type date . curl -H \"Authorization: Bearer TRIPLYDB_TOKEN\" -H \"Content-Type: application/json\" -d '{\"type\":\"elasticSearch\",\"name\":\"SERVICE_NAME\",\"config\":{\"indexTemplates\":[{\"index_patterns\":\"index\", \"name\": \"INDEX_TEMPLATE_NAME\",\"composed_of\":[\"COMPONENT_TEMPLATE_NAME\"]}], \"componentTemplates\":[{\"name\":\"COMPONENT_TEMPLATE_NAME\",\"template\":{\"mappings\":{\"properties\":{\"https://schema org/dateCreated\":{\"type\":\"date\"}}}}}]}}' -X POST \"https://api.INSTANCE/datasets/ACCOUNT/DATASET/services/\"","title":"Component templates"},{"location":"triply-cli/","text":"On this page: TriplyDB Command-line Interface (CLI) TriplyDB Command-line Interface (CLI) \u00b6 The TriplyDB Command-line Interface (CLI) offers a more convenient way to upload files and assets to a TriplyDB instance. The latest version of the CLI for the respective OS can be found here: Linux Windows MacOS For more information about how to use the CLI, execute it with the --help argument. Please contact support@triply.cc for more information.","title":"Command-line Interface"},{"location":"triply-cli/#triplydb-command-line-interface-cli","text":"The TriplyDB Command-line Interface (CLI) offers a more convenient way to upload files and assets to a TriplyDB instance. The latest version of the CLI for the respective OS can be found here: Linux Windows MacOS For more information about how to use the CLI, execute it with the --help argument. Please contact support@triply.cc for more information.","title":"TriplyDB Command-line Interface (CLI)"},{"location":"triply-db-getting-started/","text":"On this page: TriplyDB Overview TriplyDB Overview \u00b6 TriplyDB allows you to store, share, and use linked data knowledge graphs. TriplyDB makes it easy to upload linked data and expose it through various APIs, including SPARQL, GraphQL, Elasticsearch, Linked Data Fragments, and REST. Learn more about the following features: Uploading data Sharing data Viewing data Exporting data Saved queries Data stories Admin settings","title":"Overview"},{"location":"triply-db-getting-started/#triplydb-overview","text":"TriplyDB allows you to store, share, and use linked data knowledge graphs. TriplyDB makes it easy to upload linked data and expose it through various APIs, including SPARQL, GraphQL, Elasticsearch, Linked Data Fragments, and REST. Learn more about the following features: Uploading data Sharing data Viewing data Exporting data Saved queries Data stories Admin settings","title":"TriplyDB Overview"},{"location":"triply-db-getting-started/admin-settings-pages/","text":"On this page: Admin settings Pages Overview page General overview Accounts overview Data overview Services overview Settings page Set logos and banner Setting metadata Setting contact email Setting example datasets Setting Starter dataset Setting Authentication Setting Site-wide prefixes Account overview page Add new user(s) Create a new user Datasets page Services page Redirects page How to setup a redirects for dereferencing Admin settings Pages \u00b6 You can use the console to perform administrator tasks. The administrator tasks are performed within the admin settings page. The admin settings pages are accessible by clicking on the user menu in the top-right corner and selecting the \u201cAdmin settings\u201d menu item. You must have administrator privileges to access these pages and perform administrator tasks. Overview page \u00b6 The first page that comes into view when opening the admin settings pages is the overview page. This page contains an overview of all the important statistics of the instance. The page also shows how close the instance is to hitting one or more limits. If no limit is set, the statistics are shown as a counter. If a limit is set a gauge is shown with a green, orange or red bar. The colors denote how far that statistic of the instance is to the limit. Green means not close to the limit, Orange means close to the limit, Red means over the limit. General overview \u00b6 The general overview gives an insight into the software version of the instance. Each instance consists of a console and an API. The console is the web interface of the instance and has a build date corresponding to the build date of the docker image of the console and a version number corresponding to the version of the docker image. The API is the layer between the console and the data. The API is separate from the console and is a different docker image. The API also has a version and build date of the docker image. Also contains a starting time, and an updated time, the moments when the docker image is started for this instance or when the docker image is updated for the instance. Accounts overview \u00b6 The accounts overview shows how many organizations and users are in this instance. The organizations and users are shown in a counter if no limit is set. If a limit is set on the number of organizations and/or users of the instance a gauge is shown. Data overview \u00b6 The data overview shows multiple statistics about datasets. The first counter shows the amount of datasets on the instance. The second and third counters show the amount of graphs and statements in all graphs. The fourth and fifth counters show the amount of unique graphs and statements. When a graph is copied from one dataset to another, the data in that graph does not change. The amount of unique data does not change either. The amount of unique data is a more representative way of calculating the amount of data in the instance. All statistics are shown in a counter, if no limit is set. If a limit is set on one of the statistics of the instance a gauge is shown. Services overview \u00b6 The data overview shows how multiple statistics about services. The first counter shows the total amount of services on the instance, The second counter shows the total amount of statements in all the services. Then for each of our service types a specific counter is created. Each containing the amount of services and the amount of statements in that service. All statistics are shown in a counter if no limit is set. If a limit is set on one of the statistics of the instance a gauge is shown. Settings page \u00b6 The settings page is the main page for administrators to institute instance wide changes. An administrator can change the site logo's here, change the contact email or update site wide prefixes. Set logos and banner \u00b6 For changing the logos and the banner follow the next steps: Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. Under \"Site logos\" and \"Site banner\" you can upload a site logo (square and landscape) or a banner. The logo (preferably squared) is required to be SVG. In the UI the 30x30px image is displayed. The banner can be of any format, however, the WEBP is preferred. The image's resolution should be between 1920x500 and up to 4000x500. The banner is displayed at a height 500px. If the image is smaller than the browser screen size, the image will be stretched. Make sure you use files with a maximum size of 5 MB. Setting metadata \u00b6 For changing the metadata follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Site metadata\", it looks as follows: Here you can set the name, tag line, description and welcome text. The name of your website appears in your browser tab. The welcome text appears on the homepage of your TriplyDB instance. The tagline and description are for metadata purposes (e.g. findability and website previews). Setting contact email \u00b6 For changing the contact email follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Contact Email\". Here, you can change the contact email to a new contact email for the instance. Setting example datasets \u00b6 Example datasets are introduction datasets on the frontpage of your instance. The Example datasets are datasets that are interesting for people that visit your page to see and interact with. Most often you'll use open datasets to show them off on the frontpage. You can also use internal or private datasets, but these will only be visible if the person seeing them has the right access rights. For editing the example datasets follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page, navigate to \"Example datasets\". Here, you can execute the following changes: You can move datasets up and down in the order by clicking and holding your left mouse button over the three horizontal lines in front of the dataset name. You can then drag the selected dataset to their new spot. In the search field below the already added datasets you can add a new example dataset by typing in the search field and selecting the correct dataset. You can remove datasets by pressing the x on the right side of the dataset name to remove it from the example dataset list. Setting Starter dataset \u00b6 The starter dataset is a beginner-friendly linked dataset that can be an introduction into linked data when a user creates an account for the first time. The starter dataset is visible for a user when the user has not yet created a dataset on its own account. For editing the example datasets follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Starter dataset\". Here you can change the starter dataset to a new starter dataset for the instance by typing in the search bar a name of an existing dataset to replace the started dataset. This dataset then will be presented to users on their account page, with an option to import(copy) them immediately. This needs to be a public dataset! If it's not public, new users will have to create a dataset. The starter dataset is only shown if the user currently has no datasets. Setting Authentication \u00b6 One of the roles of an administrator is to make sure only the right people will sign up for the TriplyDB instance. To do this, an administrator can set up authentication protocols. The authentication protocols can block people from signing up to instances where they are not allowed to sign up to. For changing the authentication protocols follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Authentication\". Now you can change the password sign up. Allowing people to only register with a password or they are only allowed to register with a google or Github account. When password signup is enabled, the administrator can also set the permitted signup domains. Only users with e-mail addresses that match these domains are allowed to sign-up. Wildcards are allowed and domains are comma separated, for example: mydomain.com,*.mydomain.com. Setting Site-wide prefixes \u00b6 One of the advantages of using TriplyDB is that you can set site-wide prefixes once and use them everywhere on the instance. Site-wide prefixes are prefixes defined in the admin settings and can be used for all datasets that contain the IRIs matching the prefixes. For editing the side-wide prefixes follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Site-wide prefixes\". Here, you can execute the following changes: Each field of the already added site-wide prefixes you can edit. You can edit the prefix label by typing in the first field. You can edit the prefix IRI and in the second field. Pressing UPDATE PREFIXES updates the list. In the last field below the already added site-wide prefixes you can add a new site-wide prefix by typing in the first field the prefix label, and in the second field the prefix IRI. Pressing UPDATE PREFIXES updates the list. You can remove prefixes by pressing the x on the right side of the prefixes name to remove it from the site-wide prefixes list. Account overview page \u00b6 The account page governs all the accounts of an instance. The paginated table shows all the accounts of the instance. The table is equipped with filters and sorting mechanisms to quickly search for and locate specific accounts according to search and filter criteria. The table also has a search field at the right side to quickly do wildcard searching. The table sorts all accounts automatically on the created at date with the latest created at date accounts first. The filters on top of the table can be used to filter the following columns: Name The name of the account, you can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. The name is also a URL that brings you to the location of the account. Type Type of the account, this can either be 'Organization' or 'User'. In the filter you can select a specific account type or 'All' account types. Display name The display name of the account, often an account has both a name and a display name. The display name is not limited to a specific set of characters, as it is not used as an URL. You can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. Email The email address of the account. You can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. Created at How long ago an account was created. When you hover over the text you can see the precise moment an account is created. You can order accounts based on the moment of creation. Updated at How long ago an account has been updated with new metadata such as display name or password. When you hover over the text you can see the precise moment an account is last updated. You can order accounts based on the moment of updated at time. Last activity How long ago the account has been last active. When you hover over the text you can see the precise moment an account was last active. You can order the accounts based on the moment of last time the account was active. Role Role of the account, this can either be 'light', 'regular' or 'administrator'. In the filter you can select a specific role or 'All' roles. Verified An account can be verified or not, to verify an account, the user needs to click on the verify button in the email. Or an administrator has verified the account in the account settings of that account. Only 'users' need to be verified. Disabled An account can be disabled or not, to disabled an account, the user needs to click on the disabled button in their user settings. Or an administrator has disabled the account in the account settings of that account. legal consent An account can have accepted the legal consent or not, to accept legal consent, the user needs to click on the accept legal consent either when creating an account or by checking it in the user settings. Only 'users' need to have accepted legal consent. For each account you can execute the following actions: Open account settings For each account, there is a button such that the administrator can directly go to the account settings of the user or organization. The account settings are behind the `cogwheel` button. Add new user(s) \u00b6 Go to the \u201cAccounts tab\u201d to receive an overview of all accounts on the TriplyDB instance. The type of account can be observed based on the following icons: Icon Account type organization user Create a new user \u00b6 New users can only be created by administrators by performing the following steps: Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \u201cAccounts\u201d tab. This brings up an overview of all users and organizations on the TriplyDB instance. Click the \u201cAdd user\u201d button. Fill in the user name and email address of the prospective user. The user name must consist of alphanumeric characters ( A-Za-z ) and hyphens ( - ). Click the \u201cAdd user\u201d button. This sends an account creation email to the prospective user, containing a link that allows them to log in. In addition to the above default procedure, the following two options are provided for user account creation: Temporary account : By default, user accounts do not expire. Sometimes it is useful to create a temporary account by specifying a concrete date in the \u201cAccount expiration date\u201d widget. Preset password : By default, a user can set her password after logging in for the first time by clicking on the link in the account creation email. When a password is entered in the \u201cPassword\u201d field, the user must enter this password in order to log in for the first time. Datasets page \u00b6 The account page governs all the datasets of an instance. The paginated table shows all the datasets of the instance. The table is equipped with filters and sorting mechanisms to quickly search for and locate specific datasets according to search and filter criteria. The table also has a search field at the right side to quickly do wildcard searching. The table sorts all datasets automatically on the created at date with the latest created at date datasets first. The filters on top of the table can be used to filter the following columns: Name The name of the dataset, you can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. The name is also a URL that brings you to the location of the dataset. Access level Access level of the dataset, this can either be 'Public', 'Internal' or 'Private'. In the filter you can select a specific access level or 'All' access levels. Display name The display name of the dataset, often a dataset has both a name and a display name. The display name is not limited to a specific set of characters, as it is not used as an URL. You can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. Owner The owner of the dataset. The owner is a URL and brings you to the overview page of the owner. The owners can be filtered based on the sequence of characters appearing in the filter. Graph count The amount of graphs in a dataset. These are all the total amount of graphs in a dataset, and can be filtered with the slider. Statement count The amount of statements in a dataset. These are all the statements of all the graphs, and can be filtered with the slider. Service count The amount of services in a dataset. These can be filtered with the slider. Asset count The amount of assets in a dataset. These can be filtered with the slider. Created at How long ago a dataset has been created. When you hover over the text you can see the precise moment a dataset is created. You can order datasets based on the moment of creation. Updated at How long ago a dataset has been updated with new metadata such as display name or new data. When you hover over the text you can see the precise moment an account is last updated. You can order dataset based on the moment of updated at time. Last graph edit How long ago the last graph has been edited, either new data is uploaded or removed, or the graph names changed. When you hover over the text you can see the precise moment a dataset was edited. You can order the accounts based on the moment of last time the dataset was last edited. For each dataset you can execute the following actions: Open dataset settings For each dataset there is button such that the administrator can directly go to the dataset settings of the dataset. The dataset settings are behind the `cogwheel` button. Services page \u00b6 The services page governs all the services of an instance. The paginated table shows all the services of the instance. The table is equipped with filters and sorting mechanisms to quickly search for and locate specific services according to search and filter criteria. The table also has a search field at the right side to quickly do wildcard searching. The table sorts all services automatically if a service is in an error state or not. All services that are in error state will be shown at the top of the table. This way immediate action can be taken to check the service. The filters on top of the table can be used to filter the following columns: Name The name of the SPARQL service, you can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. The name is also a URL that brings you to the location of the service. Type Type of the service, this can either be 'Virtuoso', 'Jena', 'Blazegraph', 'Prolog' or 'Elasticsearch'. In the filter you can select a specific service type or 'All' service types. Status The status of the service, can be 'Starting', 'Running', 'Stopped', 'Updating' or 'Error'. In the filter you can select a specific service status or 'All' services statuses Statements The amount of statements in a service. These are all the loaded statements in the service, and can be filtered with the slider. Loaded graphs Amount of graphs loaded in the service. All the statements of all the graphs together will count up to the total amount of statements. Dataset The dataset the service belongs to. The dataset is clickable and brings you to the dataset page. The datasets can be filtered based on the sequence of characters appearing in the filter. Owner The owner of the dataset is also the owner of the service. The owner is a URL and brings you to the overview page of the owner. The owners can be filtered based on the sequence of characters appearing in the filter. Created How long ago a service has been created. When you hover over the text you can see the precise moment a service is created. You can order the services based on the moment of creation. Last queried How long ago the service has been last queried. When you hover over the text you can see the precise moment a service is last queried. You can order the services based on the moment of last time the service has been queried. Auto stops Some services are equipped with an auto stop feature. This feature reduces the memory resources when a service is not queried in a while. The column `Auto stops` shows how long it will take before a service is auto-stopped. You can order the services on when the auto-stop feature kicks in. Each time a service is used the timer is reset. Version A service always has a particular version. A service is not automatically updated as it could be that the service has possible down time. The owner of the service can update a service when they deem it necessary to update to the latest version. For each service you can execute the following actions: Update the service When a service can be updated an orange arrow will appear just below the service. When you press the update service button the service is automatically updated to the latest service version. Open additional information For each service there is additional information available. The additional information is behind the `i` button. The additional information contains information about the graphs in the dataset and a raw information view of the service metadata. Inspect the logs For each service there is a log available. The logs are behind the `text` button. The logs contain information Synchronize the service The service can be outdated. This happens when the data in the dataset does not corresponds with the data in the service. When this happens the service can be synchronized from here to make it up to date with the latest version of the data. Remove the service When a service is no longer necessary or there needs to be made some space on the instance a service can be removed from here. Some of these actions can be cumbersome when you need to do them one at a time. To help with this, on the left side of the table you can click on the tickbox. This will select all the services that match search criteria if there search criteria and all tables when there are no search criteria. When pressed you can now remove all selected services or update all selected services to a new software version. Redirects page \u00b6 The great thing about linked data is that IRIs are used to define objects in linked data. Then when you visit the IRIs you find useful information about the object. But sometimes the data is not on the location where the IRI is pointing towards. You have the IRI: https://example.org/resource/Amsterdam but the information about the object is located in the dataset https://api.triplydb.com/MyAccount/myCities. This is a problem as the IRI is pointing to a location that does not contain the data, and the data is at a location that is not found without the correct IRI. This is where you can use redirects to redirect the user from the IRI to the location where the data is found. How to setup a redirects for dereferencing \u00b6 Redirects enable easy dereferencing of resources. For example, you can dereference a resource https://example.org/resource/Amsterdam into dataset https://api.triplydb.com/MyAccount/myCities by following these steps: First update the web server of where the IRI is originally pointing towards the redirect API. In this example all subpaths of /resource are to be redirected from https://example.org to https://api.triplydb.com/redirect/$requestUri . this means that when a request for https://example.org/resource/Amsterdam comes to the web server of https://example.org it will be redirected to https://api.triplydb.com/redirect/https://example.org/resource/Amsterdam . Now that the external web server is set up to redirect to TriplyDB, TriplyDB needs to be configured to accept the request and redirect it to the correct dataset. This is done by adding a rule on the administrator redirects page. To add a rule, press the ADD RULE button to begin with the creation of a new rule. For this example we want to add a prefix rule with the pattern to match https://example.org/resource/City/ . The prefix rule needs a dataset to redirect to. This will be the dataset https://api.triplydb.com/myAccount/myCities . Press CREATE RULE to create the rule. Each rule is evaluated when a request comes in https://api.triplydb.com/redirect/$requestUri and mapping rules are evaluated from top (highest priority) to bottom (lowest priority). When a match is found the requestUri is then redirected to that location. TriplyDB supports two types of mapping rules: Prefix Prefix rules trigger when the start of a resource matches the specified string. Regex Regular Expression rules trigger when a resource matches a Regular Expression.","title":"Admin Settings Pages"},{"location":"triply-db-getting-started/admin-settings-pages/#admin-settings-pages","text":"You can use the console to perform administrator tasks. The administrator tasks are performed within the admin settings page. The admin settings pages are accessible by clicking on the user menu in the top-right corner and selecting the \u201cAdmin settings\u201d menu item. You must have administrator privileges to access these pages and perform administrator tasks.","title":"Admin settings Pages"},{"location":"triply-db-getting-started/admin-settings-pages/#overview-page","text":"The first page that comes into view when opening the admin settings pages is the overview page. This page contains an overview of all the important statistics of the instance. The page also shows how close the instance is to hitting one or more limits. If no limit is set, the statistics are shown as a counter. If a limit is set a gauge is shown with a green, orange or red bar. The colors denote how far that statistic of the instance is to the limit. Green means not close to the limit, Orange means close to the limit, Red means over the limit.","title":"Overview page"},{"location":"triply-db-getting-started/admin-settings-pages/#general-overview","text":"The general overview gives an insight into the software version of the instance. Each instance consists of a console and an API. The console is the web interface of the instance and has a build date corresponding to the build date of the docker image of the console and a version number corresponding to the version of the docker image. The API is the layer between the console and the data. The API is separate from the console and is a different docker image. The API also has a version and build date of the docker image. Also contains a starting time, and an updated time, the moments when the docker image is started for this instance or when the docker image is updated for the instance.","title":"General overview"},{"location":"triply-db-getting-started/admin-settings-pages/#accounts-overview","text":"The accounts overview shows how many organizations and users are in this instance. The organizations and users are shown in a counter if no limit is set. If a limit is set on the number of organizations and/or users of the instance a gauge is shown.","title":"Accounts overview"},{"location":"triply-db-getting-started/admin-settings-pages/#data-overview","text":"The data overview shows multiple statistics about datasets. The first counter shows the amount of datasets on the instance. The second and third counters show the amount of graphs and statements in all graphs. The fourth and fifth counters show the amount of unique graphs and statements. When a graph is copied from one dataset to another, the data in that graph does not change. The amount of unique data does not change either. The amount of unique data is a more representative way of calculating the amount of data in the instance. All statistics are shown in a counter, if no limit is set. If a limit is set on one of the statistics of the instance a gauge is shown.","title":"Data overview"},{"location":"triply-db-getting-started/admin-settings-pages/#services-overview","text":"The data overview shows how multiple statistics about services. The first counter shows the total amount of services on the instance, The second counter shows the total amount of statements in all the services. Then for each of our service types a specific counter is created. Each containing the amount of services and the amount of statements in that service. All statistics are shown in a counter if no limit is set. If a limit is set on one of the statistics of the instance a gauge is shown.","title":"Services overview"},{"location":"triply-db-getting-started/admin-settings-pages/#settings-page","text":"The settings page is the main page for administrators to institute instance wide changes. An administrator can change the site logo's here, change the contact email or update site wide prefixes.","title":"Settings page"},{"location":"triply-db-getting-started/admin-settings-pages/#set-logos-and-banner","text":"For changing the logos and the banner follow the next steps: Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. Under \"Site logos\" and \"Site banner\" you can upload a site logo (square and landscape) or a banner. The logo (preferably squared) is required to be SVG. In the UI the 30x30px image is displayed. The banner can be of any format, however, the WEBP is preferred. The image's resolution should be between 1920x500 and up to 4000x500. The banner is displayed at a height 500px. If the image is smaller than the browser screen size, the image will be stretched. Make sure you use files with a maximum size of 5 MB.","title":"Set logos and banner"},{"location":"triply-db-getting-started/admin-settings-pages/#setting-metadata","text":"For changing the metadata follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Site metadata\", it looks as follows: Here you can set the name, tag line, description and welcome text. The name of your website appears in your browser tab. The welcome text appears on the homepage of your TriplyDB instance. The tagline and description are for metadata purposes (e.g. findability and website previews).","title":"Setting metadata"},{"location":"triply-db-getting-started/admin-settings-pages/#setting-contact-email","text":"For changing the contact email follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Contact Email\". Here, you can change the contact email to a new contact email for the instance.","title":"Setting contact email"},{"location":"triply-db-getting-started/admin-settings-pages/#setting-example-datasets","text":"Example datasets are introduction datasets on the frontpage of your instance. The Example datasets are datasets that are interesting for people that visit your page to see and interact with. Most often you'll use open datasets to show them off on the frontpage. You can also use internal or private datasets, but these will only be visible if the person seeing them has the right access rights. For editing the example datasets follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page, navigate to \"Example datasets\". Here, you can execute the following changes: You can move datasets up and down in the order by clicking and holding your left mouse button over the three horizontal lines in front of the dataset name. You can then drag the selected dataset to their new spot. In the search field below the already added datasets you can add a new example dataset by typing in the search field and selecting the correct dataset. You can remove datasets by pressing the x on the right side of the dataset name to remove it from the example dataset list.","title":"Setting example datasets"},{"location":"triply-db-getting-started/admin-settings-pages/#setting-starter-dataset","text":"The starter dataset is a beginner-friendly linked dataset that can be an introduction into linked data when a user creates an account for the first time. The starter dataset is visible for a user when the user has not yet created a dataset on its own account. For editing the example datasets follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Starter dataset\". Here you can change the starter dataset to a new starter dataset for the instance by typing in the search bar a name of an existing dataset to replace the started dataset. This dataset then will be presented to users on their account page, with an option to import(copy) them immediately. This needs to be a public dataset! If it's not public, new users will have to create a dataset. The starter dataset is only shown if the user currently has no datasets.","title":"Setting Starter dataset"},{"location":"triply-db-getting-started/admin-settings-pages/#setting-authentication","text":"One of the roles of an administrator is to make sure only the right people will sign up for the TriplyDB instance. To do this, an administrator can set up authentication protocols. The authentication protocols can block people from signing up to instances where they are not allowed to sign up to. For changing the authentication protocols follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Authentication\". Now you can change the password sign up. Allowing people to only register with a password or they are only allowed to register with a google or Github account. When password signup is enabled, the administrator can also set the permitted signup domains. Only users with e-mail addresses that match these domains are allowed to sign-up. Wildcards are allowed and domains are comma separated, for example: mydomain.com,*.mydomain.com.","title":"Setting Authentication"},{"location":"triply-db-getting-started/admin-settings-pages/#setting-site-wide-prefixes","text":"One of the advantages of using TriplyDB is that you can set site-wide prefixes once and use them everywhere on the instance. Site-wide prefixes are prefixes defined in the admin settings and can be used for all datasets that contain the IRIs matching the prefixes. For editing the side-wide prefixes follow the next steps: 1. Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \"Settings\" tab. This brings up an overview of all the settings an admin can set for the TriplyDB instance. On that page navigate to \"Site-wide prefixes\". Here, you can execute the following changes: Each field of the already added site-wide prefixes you can edit. You can edit the prefix label by typing in the first field. You can edit the prefix IRI and in the second field. Pressing UPDATE PREFIXES updates the list. In the last field below the already added site-wide prefixes you can add a new site-wide prefix by typing in the first field the prefix label, and in the second field the prefix IRI. Pressing UPDATE PREFIXES updates the list. You can remove prefixes by pressing the x on the right side of the prefixes name to remove it from the site-wide prefixes list.","title":"Setting Site-wide prefixes"},{"location":"triply-db-getting-started/admin-settings-pages/#account-overview-page","text":"The account page governs all the accounts of an instance. The paginated table shows all the accounts of the instance. The table is equipped with filters and sorting mechanisms to quickly search for and locate specific accounts according to search and filter criteria. The table also has a search field at the right side to quickly do wildcard searching. The table sorts all accounts automatically on the created at date with the latest created at date accounts first. The filters on top of the table can be used to filter the following columns: Name The name of the account, you can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. The name is also a URL that brings you to the location of the account. Type Type of the account, this can either be 'Organization' or 'User'. In the filter you can select a specific account type or 'All' account types. Display name The display name of the account, often an account has both a name and a display name. The display name is not limited to a specific set of characters, as it is not used as an URL. You can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. Email The email address of the account. You can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. Created at How long ago an account was created. When you hover over the text you can see the precise moment an account is created. You can order accounts based on the moment of creation. Updated at How long ago an account has been updated with new metadata such as display name or password. When you hover over the text you can see the precise moment an account is last updated. You can order accounts based on the moment of updated at time. Last activity How long ago the account has been last active. When you hover over the text you can see the precise moment an account was last active. You can order the accounts based on the moment of last time the account was active. Role Role of the account, this can either be 'light', 'regular' or 'administrator'. In the filter you can select a specific role or 'All' roles. Verified An account can be verified or not, to verify an account, the user needs to click on the verify button in the email. Or an administrator has verified the account in the account settings of that account. Only 'users' need to be verified. Disabled An account can be disabled or not, to disabled an account, the user needs to click on the disabled button in their user settings. Or an administrator has disabled the account in the account settings of that account. legal consent An account can have accepted the legal consent or not, to accept legal consent, the user needs to click on the accept legal consent either when creating an account or by checking it in the user settings. Only 'users' need to have accepted legal consent. For each account you can execute the following actions: Open account settings For each account, there is a button such that the administrator can directly go to the account settings of the user or organization. The account settings are behind the `cogwheel` button.","title":"Account overview page"},{"location":"triply-db-getting-started/admin-settings-pages/#add-new-users","text":"Go to the \u201cAccounts tab\u201d to receive an overview of all accounts on the TriplyDB instance. The type of account can be observed based on the following icons: Icon Account type organization user","title":"Add new user(s)"},{"location":"triply-db-getting-started/admin-settings-pages/#create-a-new-user","text":"New users can only be created by administrators by performing the following steps: Click on the \u201cAdmin settings\u201d link in the user menu (top-right corner) and click the \u201cAccounts\u201d tab. This brings up an overview of all users and organizations on the TriplyDB instance. Click the \u201cAdd user\u201d button. Fill in the user name and email address of the prospective user. The user name must consist of alphanumeric characters ( A-Za-z ) and hyphens ( - ). Click the \u201cAdd user\u201d button. This sends an account creation email to the prospective user, containing a link that allows them to log in. In addition to the above default procedure, the following two options are provided for user account creation: Temporary account : By default, user accounts do not expire. Sometimes it is useful to create a temporary account by specifying a concrete date in the \u201cAccount expiration date\u201d widget. Preset password : By default, a user can set her password after logging in for the first time by clicking on the link in the account creation email. When a password is entered in the \u201cPassword\u201d field, the user must enter this password in order to log in for the first time.","title":"Create a new user"},{"location":"triply-db-getting-started/admin-settings-pages/#datasets-page","text":"The account page governs all the datasets of an instance. The paginated table shows all the datasets of the instance. The table is equipped with filters and sorting mechanisms to quickly search for and locate specific datasets according to search and filter criteria. The table also has a search field at the right side to quickly do wildcard searching. The table sorts all datasets automatically on the created at date with the latest created at date datasets first. The filters on top of the table can be used to filter the following columns: Name The name of the dataset, you can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. The name is also a URL that brings you to the location of the dataset. Access level Access level of the dataset, this can either be 'Public', 'Internal' or 'Private'. In the filter you can select a specific access level or 'All' access levels. Display name The display name of the dataset, often a dataset has both a name and a display name. The display name is not limited to a specific set of characters, as it is not used as an URL. You can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. Owner The owner of the dataset. The owner is a URL and brings you to the overview page of the owner. The owners can be filtered based on the sequence of characters appearing in the filter. Graph count The amount of graphs in a dataset. These are all the total amount of graphs in a dataset, and can be filtered with the slider. Statement count The amount of statements in a dataset. These are all the statements of all the graphs, and can be filtered with the slider. Service count The amount of services in a dataset. These can be filtered with the slider. Asset count The amount of assets in a dataset. These can be filtered with the slider. Created at How long ago a dataset has been created. When you hover over the text you can see the precise moment a dataset is created. You can order datasets based on the moment of creation. Updated at How long ago a dataset has been updated with new metadata such as display name or new data. When you hover over the text you can see the precise moment an account is last updated. You can order dataset based on the moment of updated at time. Last graph edit How long ago the last graph has been edited, either new data is uploaded or removed, or the graph names changed. When you hover over the text you can see the precise moment a dataset was edited. You can order the accounts based on the moment of last time the dataset was last edited. For each dataset you can execute the following actions: Open dataset settings For each dataset there is button such that the administrator can directly go to the dataset settings of the dataset. The dataset settings are behind the `cogwheel` button.","title":"Datasets page"},{"location":"triply-db-getting-started/admin-settings-pages/#services-page","text":"The services page governs all the services of an instance. The paginated table shows all the services of the instance. The table is equipped with filters and sorting mechanisms to quickly search for and locate specific services according to search and filter criteria. The table also has a search field at the right side to quickly do wildcard searching. The table sorts all services automatically if a service is in an error state or not. All services that are in error state will be shown at the top of the table. This way immediate action can be taken to check the service. The filters on top of the table can be used to filter the following columns: Name The name of the SPARQL service, you can order the table based on the alphanumeric ordering, and filter based on the sequence of characters appearing in the filter. The name is also a URL that brings you to the location of the service. Type Type of the service, this can either be 'Virtuoso', 'Jena', 'Blazegraph', 'Prolog' or 'Elasticsearch'. In the filter you can select a specific service type or 'All' service types. Status The status of the service, can be 'Starting', 'Running', 'Stopped', 'Updating' or 'Error'. In the filter you can select a specific service status or 'All' services statuses Statements The amount of statements in a service. These are all the loaded statements in the service, and can be filtered with the slider. Loaded graphs Amount of graphs loaded in the service. All the statements of all the graphs together will count up to the total amount of statements. Dataset The dataset the service belongs to. The dataset is clickable and brings you to the dataset page. The datasets can be filtered based on the sequence of characters appearing in the filter. Owner The owner of the dataset is also the owner of the service. The owner is a URL and brings you to the overview page of the owner. The owners can be filtered based on the sequence of characters appearing in the filter. Created How long ago a service has been created. When you hover over the text you can see the precise moment a service is created. You can order the services based on the moment of creation. Last queried How long ago the service has been last queried. When you hover over the text you can see the precise moment a service is last queried. You can order the services based on the moment of last time the service has been queried. Auto stops Some services are equipped with an auto stop feature. This feature reduces the memory resources when a service is not queried in a while. The column `Auto stops` shows how long it will take before a service is auto-stopped. You can order the services on when the auto-stop feature kicks in. Each time a service is used the timer is reset. Version A service always has a particular version. A service is not automatically updated as it could be that the service has possible down time. The owner of the service can update a service when they deem it necessary to update to the latest version. For each service you can execute the following actions: Update the service When a service can be updated an orange arrow will appear just below the service. When you press the update service button the service is automatically updated to the latest service version. Open additional information For each service there is additional information available. The additional information is behind the `i` button. The additional information contains information about the graphs in the dataset and a raw information view of the service metadata. Inspect the logs For each service there is a log available. The logs are behind the `text` button. The logs contain information Synchronize the service The service can be outdated. This happens when the data in the dataset does not corresponds with the data in the service. When this happens the service can be synchronized from here to make it up to date with the latest version of the data. Remove the service When a service is no longer necessary or there needs to be made some space on the instance a service can be removed from here. Some of these actions can be cumbersome when you need to do them one at a time. To help with this, on the left side of the table you can click on the tickbox. This will select all the services that match search criteria if there search criteria and all tables when there are no search criteria. When pressed you can now remove all selected services or update all selected services to a new software version.","title":"Services page"},{"location":"triply-db-getting-started/admin-settings-pages/#redirects-page","text":"The great thing about linked data is that IRIs are used to define objects in linked data. Then when you visit the IRIs you find useful information about the object. But sometimes the data is not on the location where the IRI is pointing towards. You have the IRI: https://example.org/resource/Amsterdam but the information about the object is located in the dataset https://api.triplydb.com/MyAccount/myCities. This is a problem as the IRI is pointing to a location that does not contain the data, and the data is at a location that is not found without the correct IRI. This is where you can use redirects to redirect the user from the IRI to the location where the data is found.","title":"Redirects page"},{"location":"triply-db-getting-started/admin-settings-pages/#how-to-setup-a-redirects-for-dereferencing","text":"Redirects enable easy dereferencing of resources. For example, you can dereference a resource https://example.org/resource/Amsterdam into dataset https://api.triplydb.com/MyAccount/myCities by following these steps: First update the web server of where the IRI is originally pointing towards the redirect API. In this example all subpaths of /resource are to be redirected from https://example.org to https://api.triplydb.com/redirect/$requestUri . this means that when a request for https://example.org/resource/Amsterdam comes to the web server of https://example.org it will be redirected to https://api.triplydb.com/redirect/https://example.org/resource/Amsterdam . Now that the external web server is set up to redirect to TriplyDB, TriplyDB needs to be configured to accept the request and redirect it to the correct dataset. This is done by adding a rule on the administrator redirects page. To add a rule, press the ADD RULE button to begin with the creation of a new rule. For this example we want to add a prefix rule with the pattern to match https://example.org/resource/City/ . The prefix rule needs a dataset to redirect to. This will be the dataset https://api.triplydb.com/myAccount/myCities . Press CREATE RULE to create the rule. Each rule is evaluated when a request comes in https://api.triplydb.com/redirect/$requestUri and mapping rules are evaluated from top (highest priority) to bottom (lowest priority). When a match is found the requestUri is then redirected to that location. TriplyDB supports two types of mapping rules: Prefix Prefix rules trigger when the start of a resource matches the specified string. Regex Regular Expression rules trigger when a resource matches a Regular Expression.","title":"How to setup a redirects for dereferencing"},{"location":"triply-db-getting-started/data-stories/","text":"On this page: Data stories Creating a data story Editing a data story Adding elements Existing query Paragraph Sharing and embedding Data stories \u00b6 A TriplyDB data story is a way of communicating information about your linked data along with explanatory text while also being able to integrate query results. Creating a data story \u00b6 You can create your own data story via the stories tab on TriplyDB. If this is your first time creating a data story, your view will look something like the image below. If you already are a proud owner of a data story, you will find it here. To create a new one, you can click the orange \"Create story\" button and you\u2019ll see the same form. In this form, you can fill in the title and set the access level for a data story. When everything is set, press the \"Create story\" button.This will take you to a new page where you can customize the data story. Editing a data story \u00b6 As shown in the image below, in the top right corner of the page, there is a menu button. Here you will find the following: Story settings : Here you can change the title and the access level of your story. Change banner : Here you can change the banner, just choose an image that you want as your banner (wide images work best). Copy : To copy the story to a different user or organization. Transfer : To transfer the story to a different user or organization. Embed : HTML to embed the story in a web page using an iFrame. Print : Dialog and print options to print the story. Delete : To delete the story. In the right lower corner you see a button with a notepad. With this button, you can toggle between the edit view, which allows you to edit the story, and the reader view, which is how readers of your story will perceive this page. Adding elements \u00b6 To create your first element press \"+ Add new element\". This will open a new form as shown in the images below. Here you can select what kind of element you want to add to your data story; you\u2019ll have the option to write text, to select an already existing SPARQL query, or even to create a new SPARQL query. Existing query \u00b6 Let\u2019s start by selecting a query for our data story. Maybe you have already created one, but if you haven\u2019t, you can select one of the queries available to you. You can search in the Query search bar and select the one you want, for example \"our-first-select-query\". Optionally you can select the version of the query and set the caption. When everything is set, press \"Create story element\". And look, we have added our first element to our story! Paragraph \u00b6 Data sometimes needs accompanying text to be completely understandable. TriplyDB not only supports writing plain text, but TriplyDB paragraphs are also markdown compliant. The markdown that you\u2019ll add in the paragraph text box will be rendered as HTML and can be previewed. TriplyDB also supports images, and even code blocks with highlighting for the most common linked data and programming languages. Sharing and embedding \u00b6 Before you know it, you will have created your first data story. Congratulations! Now it is time to share it with the world, but don\u2019t forget to set the access level to \u201cpublic\u201d. Then you have two options: 1. You can simply share the URL in TriplyDB. 2. You can embed the Data Story on your own webpage. Scroll all the way to the end of your Data Story and click the \u201c Embed\u201d button. This brings up a code snippet that you can copy/paste into your own HTML web page.","title":"Data Stories"},{"location":"triply-db-getting-started/data-stories/#data-stories","text":"A TriplyDB data story is a way of communicating information about your linked data along with explanatory text while also being able to integrate query results.","title":"Data stories"},{"location":"triply-db-getting-started/data-stories/#creating-a-data-story","text":"You can create your own data story via the stories tab on TriplyDB. If this is your first time creating a data story, your view will look something like the image below. If you already are a proud owner of a data story, you will find it here. To create a new one, you can click the orange \"Create story\" button and you\u2019ll see the same form. In this form, you can fill in the title and set the access level for a data story. When everything is set, press the \"Create story\" button.This will take you to a new page where you can customize the data story.","title":"Creating a data story"},{"location":"triply-db-getting-started/data-stories/#editing-a-data-story","text":"As shown in the image below, in the top right corner of the page, there is a menu button. Here you will find the following: Story settings : Here you can change the title and the access level of your story. Change banner : Here you can change the banner, just choose an image that you want as your banner (wide images work best). Copy : To copy the story to a different user or organization. Transfer : To transfer the story to a different user or organization. Embed : HTML to embed the story in a web page using an iFrame. Print : Dialog and print options to print the story. Delete : To delete the story. In the right lower corner you see a button with a notepad. With this button, you can toggle between the edit view, which allows you to edit the story, and the reader view, which is how readers of your story will perceive this page.","title":"Editing a data story"},{"location":"triply-db-getting-started/data-stories/#adding-elements","text":"To create your first element press \"+ Add new element\". This will open a new form as shown in the images below. Here you can select what kind of element you want to add to your data story; you\u2019ll have the option to write text, to select an already existing SPARQL query, or even to create a new SPARQL query.","title":"Adding elements"},{"location":"triply-db-getting-started/data-stories/#existing-query","text":"Let\u2019s start by selecting a query for our data story. Maybe you have already created one, but if you haven\u2019t, you can select one of the queries available to you. You can search in the Query search bar and select the one you want, for example \"our-first-select-query\". Optionally you can select the version of the query and set the caption. When everything is set, press \"Create story element\". And look, we have added our first element to our story!","title":"Existing query"},{"location":"triply-db-getting-started/data-stories/#paragraph","text":"Data sometimes needs accompanying text to be completely understandable. TriplyDB not only supports writing plain text, but TriplyDB paragraphs are also markdown compliant. The markdown that you\u2019ll add in the paragraph text box will be rendered as HTML and can be previewed. TriplyDB also supports images, and even code blocks with highlighting for the most common linked data and programming languages.","title":"Paragraph"},{"location":"triply-db-getting-started/data-stories/#sharing-and-embedding","text":"Before you know it, you will have created your first data story. Congratulations! Now it is time to share it with the world, but don\u2019t forget to set the access level to \u201cpublic\u201d. Then you have two options: 1. You can simply share the URL in TriplyDB. 2. You can embed the Data Story on your own webpage. Scroll all the way to the end of your Data Story and click the \u201c Embed\u201d button. This brings up a code snippet that you can copy/paste into your own HTML web page.","title":"Sharing and embedding"},{"location":"triply-db-getting-started/editing-data/","text":"On this page: Editing Data Choosing a Class IRI Naming Input Fields Groups and Order Workflow Editing (Draft) Submit for Review (Staged) Accept (Accepted) Revoke (Deprecated) SKOS Support SKOS Hierarchy SKOS Concept Schemas Editing Data \u00b6 In specific instances of TriplyDB, a SKOS editor is available that enables the editing of SKOS instance data based on a specified Data Model. The Editing Data section provides basic instructions for using the SKOS Editor to create and adapt new data instances. Although the Data Editor is currently limited to SKOS, future editions will enable more types of editing. In order to open the Editor, select the Dataset that you want to work with. If your instance provides the feature, you will see Editor on the left-hand side. Choosing a Class \u00b6 When searching for a class, you can type any unique character combination contained in the name into the search box. You don't need to press return in order to see all the class names that contain the pattern anywhere in the class name. IRI Naming \u00b6 When you create a class, you can define the IRI (Internationalized Resource Identifier) naming conventions to uniquely identify the class. Input Fields \u00b6 Fields can be added by clicking on the plus symbol on the right side of the list of fields. Required fields are marked by an asterisk. In the image, you can see a drop-down choice menu for the required value of the heeft contentstatus field. When you click on the downward pointing triangle on the right side of the search field, it points upwards and a list unfolds to reveal a set of choices for possible values. If you want free-form entry, you can click on the x that is revealed when the mouse is over the arrow. You can remove a field by clicking on the garbage can symbol on the right. It is also possible to search such menus with pattern search. Groups and Order \u00b6 Once you've selected a Dataset, you can go to the Data Model editor on the left side of the screen. When adding a new property, you are able to specify groups of properties and set the order in which properties should appear. The group choice menu can be used or the + can be used to add a new property group. An integer assigned to the property in the Order field will be used to order it against the value of order in other properties in the SKOS Editor. In the image, you can see the heeft voorkeurslabel has Order of 1 and heeft creatiedatum has Order of 2, so it will come after the first one in the Property sheets of the editor. Workflow \u00b6 The workflow provides a structured process for data entry and review, enabling seamless transitions between stages. The workflow consists of the following stages: flowchart LR Start(( )) -- Create --> Draft Draft -- Stage --> Staged Draft -- Delete --> Start Draft -- Edit --> Draft Staged -- Accept --> Accepted Staged -- Reject --> Draft Accepted -- Revoke --> Deprecated Accepted -- Edit --> Draft Deprecated -- Edit --> Draft Editing (Draft) \u00b6 The initial phase where data can be created or modified in draft form. Submit for Review (Staged) \u00b6 After drafting, the data can be submitted for review to ensure accuracy and completeness. Accept (Accepted) \u00b6 Once reviewed and approved, the data entry moves to the accepted stage, making it an official part of the dataset. Content status is set to active . Note: the reviewer must be a different person than the one who made the last edit. Revoke (Deprecated) \u00b6 If data becomes outdated or invalid, it can be moved to the deprecated status, effectively removing it from active use. Content status is set to inactive . SKOS Support \u00b6 Support for SKOS (Simple Knowledge Organization System) allows for hierarchical and conceptual data organization. SKOS Hierarchy \u00b6 Define and manage hierarchical relationships between data entries within SKOS. SKOS Concept Schemas \u00b6 This section allows for the creation and use of SKOS concept schemas, aiding in the classification and linking of related data concepts.","title":"Editing (SKOS) Data"},{"location":"triply-db-getting-started/editing-data/#editing-data","text":"In specific instances of TriplyDB, a SKOS editor is available that enables the editing of SKOS instance data based on a specified Data Model. The Editing Data section provides basic instructions for using the SKOS Editor to create and adapt new data instances. Although the Data Editor is currently limited to SKOS, future editions will enable more types of editing. In order to open the Editor, select the Dataset that you want to work with. If your instance provides the feature, you will see Editor on the left-hand side.","title":"Editing Data"},{"location":"triply-db-getting-started/editing-data/#choosing-a-class","text":"When searching for a class, you can type any unique character combination contained in the name into the search box. You don't need to press return in order to see all the class names that contain the pattern anywhere in the class name.","title":"Choosing a Class"},{"location":"triply-db-getting-started/editing-data/#iri-naming","text":"When you create a class, you can define the IRI (Internationalized Resource Identifier) naming conventions to uniquely identify the class.","title":"IRI Naming"},{"location":"triply-db-getting-started/editing-data/#input-fields","text":"Fields can be added by clicking on the plus symbol on the right side of the list of fields. Required fields are marked by an asterisk. In the image, you can see a drop-down choice menu for the required value of the heeft contentstatus field. When you click on the downward pointing triangle on the right side of the search field, it points upwards and a list unfolds to reveal a set of choices for possible values. If you want free-form entry, you can click on the x that is revealed when the mouse is over the arrow. You can remove a field by clicking on the garbage can symbol on the right. It is also possible to search such menus with pattern search.","title":"Input Fields"},{"location":"triply-db-getting-started/editing-data/#groups-and-order","text":"Once you've selected a Dataset, you can go to the Data Model editor on the left side of the screen. When adding a new property, you are able to specify groups of properties and set the order in which properties should appear. The group choice menu can be used or the + can be used to add a new property group. An integer assigned to the property in the Order field will be used to order it against the value of order in other properties in the SKOS Editor. In the image, you can see the heeft voorkeurslabel has Order of 1 and heeft creatiedatum has Order of 2, so it will come after the first one in the Property sheets of the editor.","title":"Groups and Order"},{"location":"triply-db-getting-started/editing-data/#workflow","text":"The workflow provides a structured process for data entry and review, enabling seamless transitions between stages. The workflow consists of the following stages: flowchart LR Start(( )) -- Create --> Draft Draft -- Stage --> Staged Draft -- Delete --> Start Draft -- Edit --> Draft Staged -- Accept --> Accepted Staged -- Reject --> Draft Accepted -- Revoke --> Deprecated Accepted -- Edit --> Draft Deprecated -- Edit --> Draft","title":"Workflow"},{"location":"triply-db-getting-started/editing-data/#editing-draft","text":"The initial phase where data can be created or modified in draft form.","title":"Editing (Draft)"},{"location":"triply-db-getting-started/editing-data/#submit-for-review-staged","text":"After drafting, the data can be submitted for review to ensure accuracy and completeness.","title":"Submit for Review (Staged)"},{"location":"triply-db-getting-started/editing-data/#accept-accepted","text":"Once reviewed and approved, the data entry moves to the accepted stage, making it an official part of the dataset. Content status is set to active . Note: the reviewer must be a different person than the one who made the last edit.","title":"Accept (Accepted)"},{"location":"triply-db-getting-started/editing-data/#revoke-deprecated","text":"If data becomes outdated or invalid, it can be moved to the deprecated status, effectively removing it from active use. Content status is set to inactive .","title":"Revoke (Deprecated)"},{"location":"triply-db-getting-started/editing-data/#skos-support","text":"Support for SKOS (Simple Knowledge Organization System) allows for hierarchical and conceptual data organization.","title":"SKOS Support"},{"location":"triply-db-getting-started/editing-data/#skos-hierarchy","text":"Define and manage hierarchical relationships between data entries within SKOS.","title":"SKOS Hierarchy"},{"location":"triply-db-getting-started/editing-data/#skos-concept-schemas","text":"This section allows for the creation and use of SKOS concept schemas, aiding in the classification and linking of related data concepts.","title":"SKOS Concept Schemas"},{"location":"triply-db-getting-started/exporting-data/","text":"On this page: Exporting Data Export Datasets Export Graphs Extract Exporting Data \u00b6 This section explains how a user of TriplyDB can export linked data stored in the triple store. Export Datasets \u00b6 The data stored on TriplyDB is stored in two different containers: datasets and graphs. Each triple contained in a dataset is part of exactly one graph. A graph is always a part of a dataset and a dataset can have multiple graphs. The following screenshot shows the dataset \"Pok\u00e9mon\" that contains three graphs: \"data\" and \"vocab\". The graph \"data\" contains 28.588 triples and the graph \"vocab\" 185 triples. By summing up the amount of triples contained in the two graphs the dataset \"Pok\u00e9mon\" contains 28.773 triples in total. To export the dataset users can click on the downwards facing arrow. In our example, the dataset is automatically downloaded as the file \"pokemon.trig\" and compressed with .gz. The name of the file is the name of the dataset. The used serialization format is \".trig\" because that is the standard format to store triples that are appended to graphs. It is also possible to export the whole dataset on the graphs interface. Select \"Graphs\" and \"EXPORT ALL GRAPHS\". Export Graphs \u00b6 To export only one graph select \"Graphs\" and the arrow next to the graph that should be exported. In this case the downloaded file \"https___triplydb.com_academy_pokemon_graphs_data.trig.gz\" is named after the graph and also compressed with \"gz\". Extract \u00b6 The process of extracting the compressed file is the same for exporting graphs and datasets. The downloaded and compressed file is automatically stored in the \"Downloads\" folder. Select the file with the ending \".gz\" and open it with a double click. This opens an application that looks similar to the following screenshot: Select the file that should be extracted. In this case select \"pokemon.trig\" and click on \"Extract\". In the following step choose the location where the extracted file should be stored.","title":"Exporting Data"},{"location":"triply-db-getting-started/exporting-data/#exporting-data","text":"This section explains how a user of TriplyDB can export linked data stored in the triple store.","title":"Exporting Data"},{"location":"triply-db-getting-started/exporting-data/#export-datasets","text":"The data stored on TriplyDB is stored in two different containers: datasets and graphs. Each triple contained in a dataset is part of exactly one graph. A graph is always a part of a dataset and a dataset can have multiple graphs. The following screenshot shows the dataset \"Pok\u00e9mon\" that contains three graphs: \"data\" and \"vocab\". The graph \"data\" contains 28.588 triples and the graph \"vocab\" 185 triples. By summing up the amount of triples contained in the two graphs the dataset \"Pok\u00e9mon\" contains 28.773 triples in total. To export the dataset users can click on the downwards facing arrow. In our example, the dataset is automatically downloaded as the file \"pokemon.trig\" and compressed with .gz. The name of the file is the name of the dataset. The used serialization format is \".trig\" because that is the standard format to store triples that are appended to graphs. It is also possible to export the whole dataset on the graphs interface. Select \"Graphs\" and \"EXPORT ALL GRAPHS\".","title":"Export Datasets"},{"location":"triply-db-getting-started/exporting-data/#export-graphs","text":"To export only one graph select \"Graphs\" and the arrow next to the graph that should be exported. In this case the downloaded file \"https___triplydb.com_academy_pokemon_graphs_data.trig.gz\" is named after the graph and also compressed with \"gz\".","title":"Export Graphs"},{"location":"triply-db-getting-started/exporting-data/#extract","text":"The process of extracting the compressed file is the same for exporting graphs and datasets. The downloaded and compressed file is automatically stored in the \"Downloads\" folder. Select the file with the ending \".gz\" and open it with a double click. This opens an application that looks similar to the following screenshot: Select the file that should be extracted. In this case select \"pokemon.trig\" and click on \"Extract\". In the following step choose the location where the extracted file should be stored.","title":"Extract"},{"location":"triply-db-getting-started/publishing-data/","text":"On this page: Sharing data Sharing your dataset Dataset settings page Update dataset profile Dataset metadata Starting services Existing services Webhooks Sharing data \u00b6 With TriplyDB you can easily make your data available to the outside world. Sharing your dataset \u00b6 You can share your dataset by setting the visibility to \u201cPublic\u201d in the dataset settings menu. Making a dataset public in TriplyDB has the following consequences: The dataset can be searched for and visited by anybody on the web. The dataset will be indexed by web search engines such as Google Dataset Search. Any services that are started for that dataset will be available to anybody on the web. This includes SPARQL, Text Search, and Linked Data Fragments. Dataset settings page \u00b6 The dataset settings page can be accessed from any dataset page. It appears as the last item in the menu to the left (see Figure 1 ). Figure 1. The homepage of a dataset. The dataset settings page contains the following items: Update dataset profile Prefixes Transfer ownership Delete dataset Webhooks Update dataset profile \u00b6 The \"Update dataset profile\" pane (see Figure 2 ) allows the following things to be configured: Dataset Access Level, see Section Access Levels . Dataset metadata, see Section Dataset metadata . Figure 2. The \"Update dataset profile\" pane. Dataset metadata \u00b6 Adding metadata for your datasets is important. This makes it easier to find your dataset later and also allows search engines and social media applications to understand your dataset. The \"Update dataset profile\" pane allows the following metadata to be configured: Dataset name Dataset slug Dataset description Example resources License Avatar Within the TriplyDB instance your dataset is now more findable for users. Whenever a user searches on one of the topics of your dataset, or types in a word that is present in the description of your dataset, the dataset will be shown as a search result. The metadata will allow TriplyDB to give a better impression of your dataset when a user visits. Figure 3 shows the dataset homepage of a dataset for which metadata has been configured. Figure 3. The dataset homepage of a dataset with configured metadata. Search engines and social media applications can recognize the metadata that is entered for datasets in TriplyDB. Figure 4 shows an example of a Social Media widget in the Slack chat application. Such widgets are automatically generated upon entering the link to a public dataset in TriplyDB. The chat application in Figure 4 understands the metadata properties: title, description, and image. Different Social Media applications may make use of different metadata properties. Figure 4. A dataset widget in the Slack chat application. Starting services \u00b6 By default, datasets in TriplyDB can be queried through TriplyDB-js as well as through the Linked Data Fragments API. In order to allow additional query paradigms, specific services can be started from the \u201cCreate service\u201d page. This page is accessed by clicking on the \u201cServices\u201d icon in the left-hand sidebar. TriplyDB instances can be configured with different types of services. The below screenshot shows the \u201cCreate service\u201d page for a TriplyDB instance that allows SPARQL, Jena SPARQL, and Elasticsearch services to be created. Notice that three different types of services can be created. It is possible to create multiple services for one dataset. Existing services \u00b6 Existing services are displayed on service widgets (see screenshot). From these widgets, services can be created or deleted. Datasets can change whenever a graph is added, deleted or renamed. When this happens, the data in a service is out of sync with the data in the dataset and a synchronization button will appear in the service widget. By clicking the button, the service will be synchronized with the current state of the dataset. Webhooks \u00b6 If you want to be notified or trigger an event when anything changes in your dataset, you can set up a webhook. The webhook page can be found under dataset's settings on the right, as shown in the image below. To create a webhook, you will need to provide the following information: Payload target : The URL to which the webhook message should be sent. Payload format : The format of the message. Trigger events : Select for which event you wish to trigger the webhook. The options are: Graph import : Happens when data is imported from a different dataset and where the data is already stored on the instance. Linked data upload : Happens when a person uploads data to the instance. The data did not exist on the instance before. Asset upload : Happens when an asset is uploaded. You can activate or deactivate the webhook with the slider after the Webhook is active message. After filling in everything, you can click on the SUBMIT button and the new webhook will be activated. For example, if you wish to trigger a pipeline on gitlab every time you upload an asset to your dataset, you can use the below snippet as a payload target, as described on the official gitlab documentation and select Asset upload as a trigger event. https://gitlab.example.com/api/v4/projects//trigger/pipeline?token=&ref= When your webhook is created and active, you can see every occasion the webhook was called in the webhook trigger history.","title":"Sharing Data"},{"location":"triply-db-getting-started/publishing-data/#sharing-data","text":"With TriplyDB you can easily make your data available to the outside world.","title":"Sharing data"},{"location":"triply-db-getting-started/publishing-data/#sharing-your-dataset","text":"You can share your dataset by setting the visibility to \u201cPublic\u201d in the dataset settings menu. Making a dataset public in TriplyDB has the following consequences: The dataset can be searched for and visited by anybody on the web. The dataset will be indexed by web search engines such as Google Dataset Search. Any services that are started for that dataset will be available to anybody on the web. This includes SPARQL, Text Search, and Linked Data Fragments.","title":"Sharing your dataset"},{"location":"triply-db-getting-started/publishing-data/#dataset-settings-page","text":"The dataset settings page can be accessed from any dataset page. It appears as the last item in the menu to the left (see Figure 1 ). Figure 1. The homepage of a dataset. The dataset settings page contains the following items: Update dataset profile Prefixes Transfer ownership Delete dataset Webhooks","title":"Dataset settings page"},{"location":"triply-db-getting-started/publishing-data/#update-dataset-profile","text":"The \"Update dataset profile\" pane (see Figure 2 ) allows the following things to be configured: Dataset Access Level, see Section Access Levels . Dataset metadata, see Section Dataset metadata . Figure 2. The \"Update dataset profile\" pane.","title":"Update dataset profile"},{"location":"triply-db-getting-started/publishing-data/#dataset-metadata","text":"Adding metadata for your datasets is important. This makes it easier to find your dataset later and also allows search engines and social media applications to understand your dataset. The \"Update dataset profile\" pane allows the following metadata to be configured: Dataset name Dataset slug Dataset description Example resources License Avatar Within the TriplyDB instance your dataset is now more findable for users. Whenever a user searches on one of the topics of your dataset, or types in a word that is present in the description of your dataset, the dataset will be shown as a search result. The metadata will allow TriplyDB to give a better impression of your dataset when a user visits. Figure 3 shows the dataset homepage of a dataset for which metadata has been configured. Figure 3. The dataset homepage of a dataset with configured metadata. Search engines and social media applications can recognize the metadata that is entered for datasets in TriplyDB. Figure 4 shows an example of a Social Media widget in the Slack chat application. Such widgets are automatically generated upon entering the link to a public dataset in TriplyDB. The chat application in Figure 4 understands the metadata properties: title, description, and image. Different Social Media applications may make use of different metadata properties. Figure 4. A dataset widget in the Slack chat application.","title":"Dataset metadata"},{"location":"triply-db-getting-started/publishing-data/#starting-services","text":"By default, datasets in TriplyDB can be queried through TriplyDB-js as well as through the Linked Data Fragments API. In order to allow additional query paradigms, specific services can be started from the \u201cCreate service\u201d page. This page is accessed by clicking on the \u201cServices\u201d icon in the left-hand sidebar. TriplyDB instances can be configured with different types of services. The below screenshot shows the \u201cCreate service\u201d page for a TriplyDB instance that allows SPARQL, Jena SPARQL, and Elasticsearch services to be created. Notice that three different types of services can be created. It is possible to create multiple services for one dataset.","title":"Starting services"},{"location":"triply-db-getting-started/publishing-data/#existing-services","text":"Existing services are displayed on service widgets (see screenshot). From these widgets, services can be created or deleted. Datasets can change whenever a graph is added, deleted or renamed. When this happens, the data in a service is out of sync with the data in the dataset and a synchronization button will appear in the service widget. By clicking the button, the service will be synchronized with the current state of the dataset.","title":"Existing services"},{"location":"triply-db-getting-started/publishing-data/#webhooks","text":"If you want to be notified or trigger an event when anything changes in your dataset, you can set up a webhook. The webhook page can be found under dataset's settings on the right, as shown in the image below. To create a webhook, you will need to provide the following information: Payload target : The URL to which the webhook message should be sent. Payload format : The format of the message. Trigger events : Select for which event you wish to trigger the webhook. The options are: Graph import : Happens when data is imported from a different dataset and where the data is already stored on the instance. Linked data upload : Happens when a person uploads data to the instance. The data did not exist on the instance before. Asset upload : Happens when an asset is uploaded. You can activate or deactivate the webhook with the slider after the Webhook is active message. After filling in everything, you can click on the SUBMIT button and the new webhook will be activated. For example, if you wish to trigger a pipeline on gitlab every time you upload an asset to your dataset, you can use the below snippet as a payload target, as described on the official gitlab documentation and select Asset upload as a trigger event. https://gitlab.example.com/api/v4/projects//trigger/pipeline?token=&ref= When your webhook is created and active, you can see every occasion the webhook was called in the webhook trigger history.","title":"Webhooks"},{"location":"triply-db-getting-started/reference/","text":"On this page: Reference Access Levels Access level control Access Level meaning Access Level dependencies Access levels and workflows Markdown support Headings Text styling Hyperlinks Code In-line code Multi-line code blocks Code language Reference \u00b6 Access Levels \u00b6 TriplyDB uses Access Levels that determine who can access content. Access Levels can be specified for the following content: Datasets, including everything that exist at the dataset level, such as metadata, settings, graphs, and services. Queries Stories Access level control \u00b6 The Access Level control (see Figure 1 ) is available on the settings page for these content types. The Access Level control also appears on the create dialog for these content types. The standard Access Level is always \"Private\". An explicit user action is needed to set the Access Level to \"Internal\" or \"Public\". Figure 1. The Access Level control for content in TriplyDB. Access Level meaning \u00b6 What an Access Level means, depends on whether content belongs to a user or to an organization. The following table contains the meaning of the Access Levels for content that belongs to a user: Icon Access Level Meaning Private Content is only accessible to you. Internal Content is accessible to anyone who is logged into the same TriplyDB environment. Public Content is accessible to anyone on the Internet. The following table contains the meaning of the Access Levels for content that belongs to an organization: Icon Access Level Meaning Private Content is only accessible to organization members. Internal Content is accessible to anyone who is logged into the same TriplyDB environment. Public Content is accessible to anyone on the Internet. Access Levels cannot be specified for the following content. This means that this content is always publicly accessible: Organizations, including their metadata and members. Users, including their metadata. Access Level dependencies \u00b6 The Access Levels for datasets, queries, and stories may affect each other. For example, a public query may use a private dataset. This means that visitors who are not logged in, can see the query, its metadata, and its query string; however, such visitors will never receive query results from the private dataset. This ensures that private content always stays private, as intended. A warning is shown to the user when a dependency is introduced to content with a stricter Access Level (see Figure 2 ). This allows the user to change the Access Levels to a consistent state. Figure 2. A public query over a private dataset. Access levels and workflows \u00b6 These access levels are often used for the following workflow: You create a new dataset/query/story starts with access level \u2018Private\u2019. As the dataset/query/story progresses, give it access level \u2018Internal\u2019 to receive feedback from other users. Once the dataset/query/story is ready, give it access level \u2018Public\u2019 to publish it to the world. Markdown support \u00b6 Triply allows rich text formatting to be used in the following places: Dataset description Account description Saved Query description Data Story elements Site welcome message The following Markdown elements are supported: Headings \u00b6 Headings are used to divide a text into different sections. The hash character ( # ) at the beginning of a line indicates a heading is used. Multiple hash characters indicate nested headings. # Heading 1 ## Heading 2 ### Heading 3 #### Heading 4 ##### Heading 5 ###### Heading 6 Text styling \u00b6 Style Syntax Output Bold **bold** bold Italic _italic_ italic Strikethrough ~~strikethrough~~ ~~strikethrough~~ Hyperlinks \u00b6 Style Syntax Output Raw URL https://triply.cc Labeled URL [label](https://triply.cc) label Notice that URLs can also be relative. This allows you to refer to other datasets, saved queries, etc. by using relative paths. Code \u00b6 There are options for formatting in-line code as well as multi-line code blocks. In-line code \u00b6 Code can also be used in-line with single backticks: Use `code` inside a sentence. Multi-line code blocks \u00b6 Multi-line code blocks start and end with three consecutive backticks. The following Markdown denotes two lines of Turtle: select * { graph ?g { ?s ?p ?o. } } The above is rendered as follows: select * { graph ?g { ?s ?p ?o. } } Code language \u00b6 The opening backticks are optionally following by the name of the code language. The following code languages are supported: Language Syntax SPARQL sparql Turtle ttl TypeScript typescript R r Python python The other supported languages are: Bash ( bash ), C ( c ), C++ ( cpp ), C# ( csharp ), Extended Backus-Naur Form ( ebnf ), Go ( go ), Haskell ( haskell ), Java ( java ), JavaScript ( javascript ), LaTeX ( latex ), Makefile ( makefile ), Markdown ( markdown ), Objective C ( objectivec ), Pascal ( pascal ), Perl ( perl ), Powershell ( powershell ), Prolog ( prolog ), Regular Expression ( regex ), Ruby ( ruby ), Scala ( scala ), SQL ( sql ), Yaml ( yaml ).","title":"Reference"},{"location":"triply-db-getting-started/reference/#reference","text":"","title":"Reference"},{"location":"triply-db-getting-started/reference/#access-levels","text":"TriplyDB uses Access Levels that determine who can access content. Access Levels can be specified for the following content: Datasets, including everything that exist at the dataset level, such as metadata, settings, graphs, and services. Queries Stories","title":"Access Levels"},{"location":"triply-db-getting-started/reference/#access-level-control","text":"The Access Level control (see Figure 1 ) is available on the settings page for these content types. The Access Level control also appears on the create dialog for these content types. The standard Access Level is always \"Private\". An explicit user action is needed to set the Access Level to \"Internal\" or \"Public\". Figure 1. The Access Level control for content in TriplyDB.","title":"Access level control"},{"location":"triply-db-getting-started/reference/#access-level-meaning","text":"What an Access Level means, depends on whether content belongs to a user or to an organization. The following table contains the meaning of the Access Levels for content that belongs to a user: Icon Access Level Meaning Private Content is only accessible to you. Internal Content is accessible to anyone who is logged into the same TriplyDB environment. Public Content is accessible to anyone on the Internet. The following table contains the meaning of the Access Levels for content that belongs to an organization: Icon Access Level Meaning Private Content is only accessible to organization members. Internal Content is accessible to anyone who is logged into the same TriplyDB environment. Public Content is accessible to anyone on the Internet. Access Levels cannot be specified for the following content. This means that this content is always publicly accessible: Organizations, including their metadata and members. Users, including their metadata.","title":"Access Level meaning"},{"location":"triply-db-getting-started/reference/#access-level-dependencies","text":"The Access Levels for datasets, queries, and stories may affect each other. For example, a public query may use a private dataset. This means that visitors who are not logged in, can see the query, its metadata, and its query string; however, such visitors will never receive query results from the private dataset. This ensures that private content always stays private, as intended. A warning is shown to the user when a dependency is introduced to content with a stricter Access Level (see Figure 2 ). This allows the user to change the Access Levels to a consistent state. Figure 2. A public query over a private dataset.","title":"Access Level dependencies"},{"location":"triply-db-getting-started/reference/#access-levels-and-workflows","text":"These access levels are often used for the following workflow: You create a new dataset/query/story starts with access level \u2018Private\u2019. As the dataset/query/story progresses, give it access level \u2018Internal\u2019 to receive feedback from other users. Once the dataset/query/story is ready, give it access level \u2018Public\u2019 to publish it to the world.","title":"Access levels and workflows"},{"location":"triply-db-getting-started/reference/#markdown-support","text":"Triply allows rich text formatting to be used in the following places: Dataset description Account description Saved Query description Data Story elements Site welcome message The following Markdown elements are supported:","title":"Markdown support"},{"location":"triply-db-getting-started/reference/#headings","text":"Headings are used to divide a text into different sections. The hash character ( # ) at the beginning of a line indicates a heading is used. Multiple hash characters indicate nested headings. # Heading 1 ## Heading 2 ### Heading 3 #### Heading 4 ##### Heading 5 ###### Heading 6","title":"Headings"},{"location":"triply-db-getting-started/reference/#text-styling","text":"Style Syntax Output Bold **bold** bold Italic _italic_ italic Strikethrough ~~strikethrough~~ ~~strikethrough~~","title":"Text styling"},{"location":"triply-db-getting-started/reference/#hyperlinks","text":"Style Syntax Output Raw URL https://triply.cc Labeled URL [label](https://triply.cc) label Notice that URLs can also be relative. This allows you to refer to other datasets, saved queries, etc. by using relative paths.","title":"Hyperlinks"},{"location":"triply-db-getting-started/reference/#code","text":"There are options for formatting in-line code as well as multi-line code blocks.","title":"Code"},{"location":"triply-db-getting-started/reference/#in-line-code","text":"Code can also be used in-line with single backticks: Use `code` inside a sentence.","title":"In-line code"},{"location":"triply-db-getting-started/reference/#multi-line-code-blocks","text":"Multi-line code blocks start and end with three consecutive backticks. The following Markdown denotes two lines of Turtle: select * { graph ?g { ?s ?p ?o. } } The above is rendered as follows: select * { graph ?g { ?s ?p ?o. } }","title":"Multi-line code blocks"},{"location":"triply-db-getting-started/reference/#code-language","text":"The opening backticks are optionally following by the name of the code language. The following code languages are supported: Language Syntax SPARQL sparql Turtle ttl TypeScript typescript R r Python python The other supported languages are: Bash ( bash ), C ( c ), C++ ( cpp ), C# ( csharp ), Extended Backus-Naur Form ( ebnf ), Go ( go ), Haskell ( haskell ), Java ( java ), JavaScript ( javascript ), LaTeX ( latex ), Makefile ( makefile ), Markdown ( markdown ), Objective C ( objectivec ), Pascal ( pascal ), Perl ( perl ), Powershell ( powershell ), Prolog ( prolog ), Regular Expression ( regex ), Ruby ( ruby ), Scala ( scala ), SQL ( sql ), Yaml ( yaml ).","title":"Code language"},{"location":"triply-db-getting-started/saved-queries/","text":"On this page: Saved Queries How to save a query Creating a new version Deleting a saved query Using a saved query Sharing a saved query Downloading a query result Download more than 10 000 query results - SPARQL pagination Pagination with the saved query API Pagination with TriplyDB.js Using a saved query as RESTful API Using a saved query in Python or R notebooks Query metadata Saved Queries \u00b6 A Saved Query is a versioned SPARQL query with its own URL. Using this URL, users are able to view any version of the query and its results. It can also be used to run the query and retrieve the results from a browser or a program, removing the hassle of figuring out how to run a SPARQL query. How to save a query \u00b6 There are two ways to create a saved query. You need to be logged in and have authorization rights on the dataset to use this feature When working from the SPARQL IDE Using the Saved Queries tab in a dataset Creating a saved query with the SPARQL IDE is done by writing a query/visualization and hitting the save button Creating a new version \u00b6 Updating the saved query can be done by clicking a query in the Saved Queries tab and editing the query or the visualization. Hit the save button to save it as a new version. Deleting a saved query \u00b6 If you want to delete a saved query, you can do so by clicking the three dots on the top right corner of the query, as shown in the image below, and then clicking Delete . Using a saved query \u00b6 Sharing a saved query \u00b6 To share a saved query, for example in Data Stories , you can copy the link that is used when you open the query in TriplyDB. Let's say you have a query called Timelined-Cars-BETA in the dataset core under the account dbpedia and you want to use version 9. Then the following link would be used: https://triplydb.com/DBpedia-association/-/queries/timeline-cars/9 If you want to always use the latest query, you can simply omit the version number like so: https://triplydb.com/DBpedia-association/-/queries/timeline-cars Downloading a query result \u00b6 The result of a query can be downloaded via the TriplyDB interface. After saving the query, open it in TriplyDB. e.g. https://triplydb.com/DBpedia-association/-/queries/timeline-cars/ . You can download results in different data format, depending on which visualization option you use. For example, if you want to download the results in a .json format, you can choose the option Response and click on the download icon or scroll down and click on Download result . The downloaded file is automatically stored in the Downloads -folder and has the name of the query. In our example, the file is called timeline-cars.json . The downloaded file contains the query result as a json-object. TriplyDB also displays the json-object when selecting the option Response . Below is a table of all supported visualizations and what format of results they produce. Visualization option Result data format Table .csv Response .json Gallery Download not supported Chart .svg Geo Download not supported Geo-3D Download not supported Geo events Download not supported Markup .svg , .html Network .png Timeline Download not supported As another example, to download the query result in CSV-format, select the option Table and click on the download icon. The downloaded file is named after the query with the suffix .csv . Download more than 10 000 query results - SPARQL pagination \u00b6 This section explains how to retrieve all results from a SPARQL query using pagination. Often SPARQL queries can return more than 10.000 results, but due to limitations the result set will only consist out of the first 10.000 results. To retrieve more than 10.000 results you can use pagination. TriplyDB supports two methods to retrieve all results from a SPARQL query. Pagination with the saved query API or Pagination with TriplyDB.js. Pagination with the saved query API \u00b6 Each TriplyDB instance has a fully RESTful API. The TriplyDB RESTful API is extended for saved SPARQL queries. The API for saved queries is extended with two arguments that the query is able to process paginated result sets. The arguments are \u2018page\u2019 and \u2018pageSize\u2019. An example of a paginated saved SPARQL query request would look like: https://api.triplydb.com/queries/academy/pokemon-color/run?page=3&pageSize=100 The example request argument \u2018page\u2019 corresponds to the requested page. In the example request this would correspond to the third page of paginated SPARQL query, according to the \u2018pageSize\u2019. There is no maximum \u2018page\u2019 limit, as a SPARQL query could return an arbitrary number of results. When no results can be retrieved for the requested page an empty page will be returned. The argument \u2018pageSize\u2019 corresponds to how many results each page would contain. The \u2018pageSize\u2019 has a default of 100 returned results and a maximum \u2018pageSize\u2019 limit of 10.000 returned results. The request will return an error when the \u2018pageSize\u2019 is set higher than 10.000. The RESTful API for the saved SPARQL queries follows the RFC 8288 standard. The request will return an response body containing the result set and a response header. The response header contains a link header with the relative \"next\" request, the relative \"prev\" request, and the relative \"first\" request. By following the \"next\" link header request you can chain the pagination and retrieve all results. link: ; rel=\"next\", ; rel=\"prev\", ; rel=\"first\" Pagination with TriplyDB.js \u00b6 TriplyDB.js is the official programming library for interacting with TriplyDB . TriplyDB.js allows the user to connect to a TriplyDB instance via the TypeScript language. TriplyDB.js has the advantage that it can handle pagination internally so it can reliably retrieve a large number of results. To get the output for a construct or select query, follow these steps: 1. Import the TriplyDB library and set your parameters, regarding the TriplyDB instance and the account in which you have saved the query as well as the name of the query. Do not forget that we perform TriplyDB.js requests within an async context. import Client from '@triply/triplydb' async function run() { // Your code goes here. const client = Client.get({token: process.env.TRIPLYDB_TOKEN}) const account = await client.getAccount('account-name') const query = await account.getQuery('name-of-some-query') } run() 2. Get the results of a query by setting a results variable. More specifically, for construct queries you use the statements() call: const query = await account.getQuery('name-of-some-query') const results = query.results().statements() For select queries you use the bindings() call: const query = await account.getQuery('name-of-some-query') const results = query.results().bindings() Additionally, saved queries can have 'API variables' that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows: // For SPARQL construct queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).statements() // For SPARQL select queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).bindings() 3. To iterate the results of your SPARQL query you have three options: a. Iterate through the results per row in a for -loop: // Iterating over the results. for await (const row of results) { // execute something } Note: For select queries the for -loop iterates over the rows of the result set. For construct queries the for -loop iterates over the statements in the result set. b. Save the results to a file. This is only supported for SPARQL construct queries: // Saving the results of a SPARQL construct query to a file. await results.toFile('my-file.nt') c. Load all results into memory in the form of an Array. Note that this is almost never used. If you want to process results, then use the 3a option; if you want to persist results, then option 3b suits better. // Loading results for a SPARQL construct or SPARQL select query into memory. const array = await results.toArray() Using a saved query as RESTful API \u00b6 Each TriplyDB instance has a fully RESTful API. The TriplyDB RESTful API is extended for saved SPARQL queries. A saved query can be used as a RESTful API to retrieve data from your linked dataset. The URL next to the keywork API is the RESTful API URL and can be used with RESTful API libraries. You can copy the RESTful API by pressing the copy button just behind the URL. Pressing the copy button from the above query will result in the following run url: https://api.triplydb.com/queries/DBpedia-association/timeline-cars/run When you copy this URL in your browser or fetch the URL with curl, you will get a get request to a RESTful API and get a JSON representation of the data in your browser or command window. Using a saved query in Python or R notebooks \u00b6 SPARQL queries as a RESTful API, also means you can transport your data to your Python script, R script or Jupyter notebook. To use the result set from your SPARQL query you need to connect your script to the saved SPARQL query. To do this you will need to write a small connector. To help you out TriplyDB has added a code snippet generator for Python and R. This snippet contains the code to retrieve the data from the SPARQL query into your script or notebook. You can open the code snippet generator by clicking on the '' button on the right side of the screen. Clicking the '' button opens the code snippet screen. Here you select the snippet in the language you want to have, either Python or R. You can then copy the snippet, by clicking the 'copy to clipboard' button or selecting the snippet and pressing ctrl-c . Now you can paste the code in the location you want to use the data. The data is stored in the data variable in JSON format. When the SPARQL query is not public, but instead either private or internal, you will need to add an authorization header to the get request. Without the authorization header the request will return an incorrect response. Checkout Creating your API token about creating your API-token for the authorization header. Check out the SPARQL pagination page when you want to query a SPARQL query that holds more than 10.000 results. The SPARQL pagination page will explain how you can retrieve the complete set. Query metadata \u00b6 Every Saved Query has a metadata section. This metadata section includes the following two links: A link to the dataset over which the query is executed. Clicking this links navigates to the dataset homepage. A link to the service by which the query is executed. Clicking this link navigates to the services page that includes that service. Users can specify a query title and description, both of which are included as metadata. The access level and version of the query are also exposed as metadata. See the following screenshot for how the metadata fields are shown in TriplyDB: Users can specify additional metadata inside the query string, by using the GRLC annotation format. GRLC annotations start with the hash and plus sign characters ( #+ ). Visit the GRLC project to learn more about this format. For example, the following GRLC annotation could indicate to a software application that the query should be repeated every hour: #+ frequency: hourly See the Triply API documentation for how to retrieve query metadata, including how to retrieve GRLC annotations.","title":"Saved Queries"},{"location":"triply-db-getting-started/saved-queries/#saved-queries","text":"A Saved Query is a versioned SPARQL query with its own URL. Using this URL, users are able to view any version of the query and its results. It can also be used to run the query and retrieve the results from a browser or a program, removing the hassle of figuring out how to run a SPARQL query.","title":"Saved Queries"},{"location":"triply-db-getting-started/saved-queries/#how-to-save-a-query","text":"There are two ways to create a saved query. You need to be logged in and have authorization rights on the dataset to use this feature When working from the SPARQL IDE Using the Saved Queries tab in a dataset Creating a saved query with the SPARQL IDE is done by writing a query/visualization and hitting the save button","title":"How to save a query"},{"location":"triply-db-getting-started/saved-queries/#creating-a-new-version","text":"Updating the saved query can be done by clicking a query in the Saved Queries tab and editing the query or the visualization. Hit the save button to save it as a new version.","title":"Creating a new version"},{"location":"triply-db-getting-started/saved-queries/#deleting-a-saved-query","text":"If you want to delete a saved query, you can do so by clicking the three dots on the top right corner of the query, as shown in the image below, and then clicking Delete .","title":"Deleting a saved query"},{"location":"triply-db-getting-started/saved-queries/#using-a-saved-query","text":"","title":"Using a saved query"},{"location":"triply-db-getting-started/saved-queries/#sharing-a-saved-query","text":"To share a saved query, for example in Data Stories , you can copy the link that is used when you open the query in TriplyDB. Let's say you have a query called Timelined-Cars-BETA in the dataset core under the account dbpedia and you want to use version 9. Then the following link would be used: https://triplydb.com/DBpedia-association/-/queries/timeline-cars/9 If you want to always use the latest query, you can simply omit the version number like so: https://triplydb.com/DBpedia-association/-/queries/timeline-cars","title":"Sharing a saved query"},{"location":"triply-db-getting-started/saved-queries/#downloading-a-query-result","text":"The result of a query can be downloaded via the TriplyDB interface. After saving the query, open it in TriplyDB. e.g. https://triplydb.com/DBpedia-association/-/queries/timeline-cars/ . You can download results in different data format, depending on which visualization option you use. For example, if you want to download the results in a .json format, you can choose the option Response and click on the download icon or scroll down and click on Download result . The downloaded file is automatically stored in the Downloads -folder and has the name of the query. In our example, the file is called timeline-cars.json . The downloaded file contains the query result as a json-object. TriplyDB also displays the json-object when selecting the option Response . Below is a table of all supported visualizations and what format of results they produce. Visualization option Result data format Table .csv Response .json Gallery Download not supported Chart .svg Geo Download not supported Geo-3D Download not supported Geo events Download not supported Markup .svg , .html Network .png Timeline Download not supported As another example, to download the query result in CSV-format, select the option Table and click on the download icon. The downloaded file is named after the query with the suffix .csv .","title":"Downloading a query result"},{"location":"triply-db-getting-started/saved-queries/#download-more-than-10-000-query-results-sparql-pagination","text":"This section explains how to retrieve all results from a SPARQL query using pagination. Often SPARQL queries can return more than 10.000 results, but due to limitations the result set will only consist out of the first 10.000 results. To retrieve more than 10.000 results you can use pagination. TriplyDB supports two methods to retrieve all results from a SPARQL query. Pagination with the saved query API or Pagination with TriplyDB.js.","title":"Download more than 10 000 query results - SPARQL pagination"},{"location":"triply-db-getting-started/saved-queries/#pagination-with-the-saved-query-api","text":"Each TriplyDB instance has a fully RESTful API. The TriplyDB RESTful API is extended for saved SPARQL queries. The API for saved queries is extended with two arguments that the query is able to process paginated result sets. The arguments are \u2018page\u2019 and \u2018pageSize\u2019. An example of a paginated saved SPARQL query request would look like: https://api.triplydb.com/queries/academy/pokemon-color/run?page=3&pageSize=100 The example request argument \u2018page\u2019 corresponds to the requested page. In the example request this would correspond to the third page of paginated SPARQL query, according to the \u2018pageSize\u2019. There is no maximum \u2018page\u2019 limit, as a SPARQL query could return an arbitrary number of results. When no results can be retrieved for the requested page an empty page will be returned. The argument \u2018pageSize\u2019 corresponds to how many results each page would contain. The \u2018pageSize\u2019 has a default of 100 returned results and a maximum \u2018pageSize\u2019 limit of 10.000 returned results. The request will return an error when the \u2018pageSize\u2019 is set higher than 10.000. The RESTful API for the saved SPARQL queries follows the RFC 8288 standard. The request will return an response body containing the result set and a response header. The response header contains a link header with the relative \"next\" request, the relative \"prev\" request, and the relative \"first\" request. By following the \"next\" link header request you can chain the pagination and retrieve all results. link: ; rel=\"next\", ; rel=\"prev\", ; rel=\"first\"","title":"Pagination with the saved query API"},{"location":"triply-db-getting-started/saved-queries/#pagination-with-triplydbjs","text":"TriplyDB.js is the official programming library for interacting with TriplyDB . TriplyDB.js allows the user to connect to a TriplyDB instance via the TypeScript language. TriplyDB.js has the advantage that it can handle pagination internally so it can reliably retrieve a large number of results. To get the output for a construct or select query, follow these steps: 1. Import the TriplyDB library and set your parameters, regarding the TriplyDB instance and the account in which you have saved the query as well as the name of the query. Do not forget that we perform TriplyDB.js requests within an async context. import Client from '@triply/triplydb' async function run() { // Your code goes here. const client = Client.get({token: process.env.TRIPLYDB_TOKEN}) const account = await client.getAccount('account-name') const query = await account.getQuery('name-of-some-query') } run() 2. Get the results of a query by setting a results variable. More specifically, for construct queries you use the statements() call: const query = await account.getQuery('name-of-some-query') const results = query.results().statements() For select queries you use the bindings() call: const query = await account.getQuery('name-of-some-query') const results = query.results().bindings() Additionally, saved queries can have 'API variables' that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows: // For SPARQL construct queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).statements() // For SPARQL select queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).bindings() 3. To iterate the results of your SPARQL query you have three options: a. Iterate through the results per row in a for -loop: // Iterating over the results. for await (const row of results) { // execute something } Note: For select queries the for -loop iterates over the rows of the result set. For construct queries the for -loop iterates over the statements in the result set. b. Save the results to a file. This is only supported for SPARQL construct queries: // Saving the results of a SPARQL construct query to a file. await results.toFile('my-file.nt') c. Load all results into memory in the form of an Array. Note that this is almost never used. If you want to process results, then use the 3a option; if you want to persist results, then option 3b suits better. // Loading results for a SPARQL construct or SPARQL select query into memory. const array = await results.toArray()","title":"Pagination with TriplyDB.js"},{"location":"triply-db-getting-started/saved-queries/#using-a-saved-query-as-restful-api","text":"Each TriplyDB instance has a fully RESTful API. The TriplyDB RESTful API is extended for saved SPARQL queries. A saved query can be used as a RESTful API to retrieve data from your linked dataset. The URL next to the keywork API is the RESTful API URL and can be used with RESTful API libraries. You can copy the RESTful API by pressing the copy button just behind the URL. Pressing the copy button from the above query will result in the following run url: https://api.triplydb.com/queries/DBpedia-association/timeline-cars/run When you copy this URL in your browser or fetch the URL with curl, you will get a get request to a RESTful API and get a JSON representation of the data in your browser or command window.","title":"Using a saved query as RESTful API"},{"location":"triply-db-getting-started/saved-queries/#using-a-saved-query-in-python-or-r-notebooks","text":"SPARQL queries as a RESTful API, also means you can transport your data to your Python script, R script or Jupyter notebook. To use the result set from your SPARQL query you need to connect your script to the saved SPARQL query. To do this you will need to write a small connector. To help you out TriplyDB has added a code snippet generator for Python and R. This snippet contains the code to retrieve the data from the SPARQL query into your script or notebook. You can open the code snippet generator by clicking on the '' button on the right side of the screen. Clicking the '' button opens the code snippet screen. Here you select the snippet in the language you want to have, either Python or R. You can then copy the snippet, by clicking the 'copy to clipboard' button or selecting the snippet and pressing ctrl-c . Now you can paste the code in the location you want to use the data. The data is stored in the data variable in JSON format. When the SPARQL query is not public, but instead either private or internal, you will need to add an authorization header to the get request. Without the authorization header the request will return an incorrect response. Checkout Creating your API token about creating your API-token for the authorization header. Check out the SPARQL pagination page when you want to query a SPARQL query that holds more than 10.000 results. The SPARQL pagination page will explain how you can retrieve the complete set.","title":"Using a saved query in Python or R notebooks"},{"location":"triply-db-getting-started/saved-queries/#query-metadata","text":"Every Saved Query has a metadata section. This metadata section includes the following two links: A link to the dataset over which the query is executed. Clicking this links navigates to the dataset homepage. A link to the service by which the query is executed. Clicking this link navigates to the services page that includes that service. Users can specify a query title and description, both of which are included as metadata. The access level and version of the query are also exposed as metadata. See the following screenshot for how the metadata fields are shown in TriplyDB: Users can specify additional metadata inside the query string, by using the GRLC annotation format. GRLC annotations start with the hash and plus sign characters ( #+ ). Visit the GRLC project to learn more about this format. For example, the following GRLC annotation could indicate to a software application that the query should be repeated every hour: #+ frequency: hourly See the Triply API documentation for how to retrieve query metadata, including how to retrieve GRLC annotations.","title":"Query metadata"},{"location":"triply-db-getting-started/uploading-data/","text":"On this page: Uploading Data Creating a new dataset Opening the \u201cCreate dataset\u201d dialog Inside the \u201cCreate dataset\u201d dialog Adding data Opening the \u201cAdd data\u201d pane Inside the \u201cAdd data\u201d pane Add data from an existing dataset Add data from URL Add data from files Supported data formats Adding malformed data Assets: binary data Uploading Data \u00b6 This section explains how to create a linked dataset in TriplyDB. Creating a new dataset \u00b6 You must be logged in before you can create a new dataset. Opening the \u201cCreate dataset\u201d dialog \u00b6 You can create a new dataset in either of the following two ways: From the home screen (see Figure 1a ), click on the + button next to \"Your datasets\", on the right-hand side of the screen. From the user screen (see Figure 1b ), click on the \u201cCreate dataset\u201d button on the right-hand side. Figure 1a. The home screen for a logged in user. Figure 1b. The user screen for a logged in user. Inside the \u201cCreate dataset\u201d dialog \u00b6 This opens the \u201cCreate dataset\u201d dialog (see Figure 2 ). Figure 2. The \u201cCreate dataset\u201d dialog In the \u201cCreate dataset\u201d dialog, perform the following steps: Required: Enter a dataset name. A dataset name can contain letters, number, and hyphens. Optional: Enter a dataset display name. The display name will be shown in the GUI and will be included in dataset metadata. Optional: Enter a dataset description. This description will be shown in the GUI, and will be included in dataset metadata. The description can be formatted with Markdown. See Section Markdown for details. Optional: Change the access level of the dataset. The standard access level is \u201cPrivate\u201d. See Section Dataset Access Levels for more information. This creates a new dataset, and displays the \u201cAdd data\u201d page (see Section Adding data ). Adding data \u00b6 You must first have a dataset, before you can add data. See Section Creating a new dataset for more information. Opening the \u201cAdd data\u201d pane \u00b6 You can open the \u201cAdd data\u201d pane in either of the following two ways: From the Graphs page, click on the \"Import a new graph\" button (see Figure 3a ). This opens the \"Add data\" pane. When a dataset does not have any data yet, a message is displayed on the dataset homepage (see Figure 3b ) that can be clicked. This opens the \"Add data\" pane. After creating a new dataset, the \"Add data\" pane is automatically opened. Figure 3a. The Graphs page of a dataset. Figure 3b. The Graphs page of a dataset. Inside the \u201cAdd data\u201d pane \u00b6 The \u201cAdd data\u201d pane is now displayed (see Figure 4 ). Figure 4. The \u201cAdd data\u201d pane. In the \"Add data\" pane, choose one of the following approaches for adding data: \"Add data from an existing dataset\" Search for data from a dataset that you have access to in the TriplyDB system. After you have found a dataset, you can choose which graphs to add. See Section Add data from an existing dataset for more details. \"Add data from URL\" Enter a URL to a data file that is published online. The URL must be publicly accessible. The URL must point to a file that contains RDF or CSV data. See Section Add data from a URL for more details. \"Add data from files\" Click the cloud icon to open a file explorer window, in which you can select one or more files from your computer. Alternatively, drag-and-drop the local files from your computer onto the cloud icon with the upward pointing arrow. Files must contain RDF or CSV data. See Section Add data from files for more details. Add data from an existing dataset \u00b6 The first option for adding data is to add it from datasets that are already published in the same TriplyDB instance. This is done with the \u201cAdd data from an existing dataset\u201d field. By typing in this field, a dropdown list of existing datasets is shown (see Figure 5 ). Figure 5. The dropdown list that shows existing datasets. Once the correct dataset appears in the dropdown list, click it to select it. This will open the \"Import from dataset\" pane (see Figure 6 ). You can choose which graphs to import from the existing dataset. Click \"Import graphs\" to start importing from an existing dataset. Moments later, the graphs are added to your dataset. Figure 6. The \"Import from dataset\" pane. Add data from URL \u00b6 The second option for adding data is to add it from an online URL. This is done by entering the URL inside the \u201cAdd data from a URL\u201d text field (see Figure 7 ). After you have entered the URL, click the orange button on the right to start adding data. The data is now being downloaded to your dataset. How long this takes depends on the size of the data and the speed of the remote server where the data is retrieved from. Figure 7. The \"Add data from URL\" field. Only URLs that contain supported data formats will be added. See Section Supported data formats for more information. Add data from files \u00b6 The third option for adding data is to add it from files that are on your computer. This can be done in two ways: Click the cloud icon to open a file finder dialog. Here you can select one or more files from your computer (see Figure 8 ). The file finder dialog that opens, depends on your Operating System. In Figure 8 , the Windows file finder dialog is shown. Drag-and-drop one or more files from your computer onto the cloud icon with the upward pointing arrow. Figure 8. The file finder dialog that is opened when adding data from files. After you have added one of more files, a list of uploaded appears (see Figure 9 ). You can add or remove more files, until you have the complete upload job configured. Once the list of files is complete, you can click \"Import from files\" to start adding data from files. How long this takes depends on the size of the data. Figure 9. The list of uploaded files in the \"Add data from files\" pane. Only files that contain supported data formats will be added. See Section Supported data formats for more information. Supported data formats \u00b6 Files must contain RDF and/or CSV data, and must use one of the supported file name extensions: Data Format File name extension Comma-Separated Values (CSV) .csv JSON-LD .jsonld , .json N-Quads .nq N-Triples .nt RDF/XML .rdf , .owl , .owx TriG .trig Turtle .ttl , .n3 It is possible to upload up to 1,000 separate files in this way. When you have a lot of files and/or large files, it is better to compress them into an archive format. This allows an any number of files of any size to be uploaded. The following archive/compression formats are supported: Archive format File name extension gzip .gz bzip2 .bz2 tar tar XZ .xz ZIP .zip Adding malformed data \u00b6 TriplyDB only allows valid RDF data to be added. If data is malformed, TriplyDB will show an error message that indicates which part of the RDF data is malformed (see screenshot). If such malformed data is encountered, the RDF file must first be corrected and uploaded again. TriplyDB follows the linked data standards strictly. Many triple stores allow incorrect RDF data to be added. This may seem convenient during the loading phase, but often results in errors when standards-compliant tools start using the data. Assets: binary data \u00b6 Not all data can be stored as RDF data. For example images and video files use a binary format. Such files can also be stored in TriplyDB and can be integrated into the Knowledge Graph.","title":"Uploading Data"},{"location":"triply-db-getting-started/uploading-data/#uploading-data","text":"This section explains how to create a linked dataset in TriplyDB.","title":"Uploading Data"},{"location":"triply-db-getting-started/uploading-data/#creating-a-new-dataset","text":"You must be logged in before you can create a new dataset.","title":"Creating a new dataset"},{"location":"triply-db-getting-started/uploading-data/#opening-the-create-dataset-dialog","text":"You can create a new dataset in either of the following two ways: From the home screen (see Figure 1a ), click on the + button next to \"Your datasets\", on the right-hand side of the screen. From the user screen (see Figure 1b ), click on the \u201cCreate dataset\u201d button on the right-hand side. Figure 1a. The home screen for a logged in user. Figure 1b. The user screen for a logged in user.","title":"Opening the \u201cCreate dataset\u201d dialog"},{"location":"triply-db-getting-started/uploading-data/#inside-the-create-dataset-dialog","text":"This opens the \u201cCreate dataset\u201d dialog (see Figure 2 ). Figure 2. The \u201cCreate dataset\u201d dialog In the \u201cCreate dataset\u201d dialog, perform the following steps: Required: Enter a dataset name. A dataset name can contain letters, number, and hyphens. Optional: Enter a dataset display name. The display name will be shown in the GUI and will be included in dataset metadata. Optional: Enter a dataset description. This description will be shown in the GUI, and will be included in dataset metadata. The description can be formatted with Markdown. See Section Markdown for details. Optional: Change the access level of the dataset. The standard access level is \u201cPrivate\u201d. See Section Dataset Access Levels for more information. This creates a new dataset, and displays the \u201cAdd data\u201d page (see Section Adding data ).","title":"Inside the \u201cCreate dataset\u201d dialog"},{"location":"triply-db-getting-started/uploading-data/#adding-data","text":"You must first have a dataset, before you can add data. See Section Creating a new dataset for more information.","title":"Adding data"},{"location":"triply-db-getting-started/uploading-data/#opening-the-add-data-pane","text":"You can open the \u201cAdd data\u201d pane in either of the following two ways: From the Graphs page, click on the \"Import a new graph\" button (see Figure 3a ). This opens the \"Add data\" pane. When a dataset does not have any data yet, a message is displayed on the dataset homepage (see Figure 3b ) that can be clicked. This opens the \"Add data\" pane. After creating a new dataset, the \"Add data\" pane is automatically opened. Figure 3a. The Graphs page of a dataset. Figure 3b. The Graphs page of a dataset.","title":"Opening the \u201cAdd data\u201d pane"},{"location":"triply-db-getting-started/uploading-data/#inside-the-add-data-pane","text":"The \u201cAdd data\u201d pane is now displayed (see Figure 4 ). Figure 4. The \u201cAdd data\u201d pane. In the \"Add data\" pane, choose one of the following approaches for adding data: \"Add data from an existing dataset\" Search for data from a dataset that you have access to in the TriplyDB system. After you have found a dataset, you can choose which graphs to add. See Section Add data from an existing dataset for more details. \"Add data from URL\" Enter a URL to a data file that is published online. The URL must be publicly accessible. The URL must point to a file that contains RDF or CSV data. See Section Add data from a URL for more details. \"Add data from files\" Click the cloud icon to open a file explorer window, in which you can select one or more files from your computer. Alternatively, drag-and-drop the local files from your computer onto the cloud icon with the upward pointing arrow. Files must contain RDF or CSV data. See Section Add data from files for more details.","title":"Inside the \u201cAdd data\u201d pane"},{"location":"triply-db-getting-started/uploading-data/#add-data-from-an-existing-dataset","text":"The first option for adding data is to add it from datasets that are already published in the same TriplyDB instance. This is done with the \u201cAdd data from an existing dataset\u201d field. By typing in this field, a dropdown list of existing datasets is shown (see Figure 5 ). Figure 5. The dropdown list that shows existing datasets. Once the correct dataset appears in the dropdown list, click it to select it. This will open the \"Import from dataset\" pane (see Figure 6 ). You can choose which graphs to import from the existing dataset. Click \"Import graphs\" to start importing from an existing dataset. Moments later, the graphs are added to your dataset. Figure 6. The \"Import from dataset\" pane.","title":"Add data from an existing dataset"},{"location":"triply-db-getting-started/uploading-data/#add-data-from-url","text":"The second option for adding data is to add it from an online URL. This is done by entering the URL inside the \u201cAdd data from a URL\u201d text field (see Figure 7 ). After you have entered the URL, click the orange button on the right to start adding data. The data is now being downloaded to your dataset. How long this takes depends on the size of the data and the speed of the remote server where the data is retrieved from. Figure 7. The \"Add data from URL\" field. Only URLs that contain supported data formats will be added. See Section Supported data formats for more information.","title":"Add data from URL"},{"location":"triply-db-getting-started/uploading-data/#add-data-from-files","text":"The third option for adding data is to add it from files that are on your computer. This can be done in two ways: Click the cloud icon to open a file finder dialog. Here you can select one or more files from your computer (see Figure 8 ). The file finder dialog that opens, depends on your Operating System. In Figure 8 , the Windows file finder dialog is shown. Drag-and-drop one or more files from your computer onto the cloud icon with the upward pointing arrow. Figure 8. The file finder dialog that is opened when adding data from files. After you have added one of more files, a list of uploaded appears (see Figure 9 ). You can add or remove more files, until you have the complete upload job configured. Once the list of files is complete, you can click \"Import from files\" to start adding data from files. How long this takes depends on the size of the data. Figure 9. The list of uploaded files in the \"Add data from files\" pane. Only files that contain supported data formats will be added. See Section Supported data formats for more information.","title":"Add data from files"},{"location":"triply-db-getting-started/uploading-data/#supported-data-formats","text":"Files must contain RDF and/or CSV data, and must use one of the supported file name extensions: Data Format File name extension Comma-Separated Values (CSV) .csv JSON-LD .jsonld , .json N-Quads .nq N-Triples .nt RDF/XML .rdf , .owl , .owx TriG .trig Turtle .ttl , .n3 It is possible to upload up to 1,000 separate files in this way. When you have a lot of files and/or large files, it is better to compress them into an archive format. This allows an any number of files of any size to be uploaded. The following archive/compression formats are supported: Archive format File name extension gzip .gz bzip2 .bz2 tar tar XZ .xz ZIP .zip","title":"Supported data formats"},{"location":"triply-db-getting-started/uploading-data/#adding-malformed-data","text":"TriplyDB only allows valid RDF data to be added. If data is malformed, TriplyDB will show an error message that indicates which part of the RDF data is malformed (see screenshot). If such malformed data is encountered, the RDF file must first be corrected and uploaded again. TriplyDB follows the linked data standards strictly. Many triple stores allow incorrect RDF data to be added. This may seem convenient during the loading phase, but often results in errors when standards-compliant tools start using the data.","title":"Adding malformed data"},{"location":"triply-db-getting-started/uploading-data/#assets-binary-data","text":"Not all data can be stored as RDF data. For example images and video files use a binary format. Such files can also be stored in TriplyDB and can be integrated into the Knowledge Graph.","title":"Assets: binary data"},{"location":"triply-db-getting-started/viewing-data/","text":"On this page: Viewing Data Linked Data Browser Types Labels Descriptions Geo Images Audio Video Linked Data Table SPARQL IDE Saving a SPARQL query Sharing a SPARQL query Transfer a SPARQL query Copy a SPARQL query ElasticSearch GraphQL Insights Class frequency Class hierarchy When does the class hierarchy show? Viewing Data \u00b6 TriplyDB offers several ways to explore your datasets. Linked Data Browser \u00b6 The linked data browser offers to traverse the data by focusing on one node at the time. The node is describe using it's properties, which can be followed to other nodes in the graph. The following properties provide additional information about your linked data, enabling the LD-browser to display visualizations and provide a better user experience. Types \u00b6 The predicate rdf:type allows you to specify the type or class of a resource in your linked data: By using rdf:type , you can indicate the category or classification of the resource, which can help the LD-browser understand the nature of the data and display it appropriately. In the example below , you can see that \"Iris setosa\" is the type of flowering plant due to the usage of the rdf:type property. Labels \u00b6 Labels are typically used to provide a concise and meaningful title or display name for a resource, making it easier for users to understand the content of your linked data. These predicates allow you to provide human-readable labels or names for your resources: The property rdfs:label The property skos:prefLabel In the example below , the rdfs:label property was used to denote the label(name) of the Pokemon, resulting in the display of \"Pikachu\" above its corresponding image. Descriptions \u00b6 Descriptions can provide additional context or information about a resource, helping users understand its purpose, content, or significance. These predicates allow you to provide textual descriptions or comments about your resources: The property sdo:description The property rdfs:comment In the following example rdfs:comment was used to provide additional information on Iris Setosa. Geo \u00b6 These are some of the predicates used for representing geographic information in your LD-browser: The property geo:asWKT : This property allows you to specify the geometries of geographic features using the Well-Known Text (WKT) format, which can be visualized on a map in the LD-browser. The property geo:hasGeometry : This property is used to link a geographic feature with its corresponding geometry. The property geo:location : This property is used to indicate the location of a resource using geographic coordinates, such as latitude and longitude, in your linked data. In the following example geo:hasGeometry property was used to showcase a map depicting the location of Instituut voor Beeld en Geluid. Images \u00b6 These predicates allow you to associate images or visual representations with your resources: Class sdo:ImageObject The property foaf:depiction The property foaf:thumbnail The property foaf:img The property sdo:image The property sdo:contentUrl By using these predicates, you can provide URLs or references to images that can be displayed alongside your linked data in the LD-browser. In the example below , foaf:depiction was used to display picture of Pikachu in the LD-browser: Audio \u00b6 These predicates allow you to associate audio content with your resources: The class sdo:AudioObject The property sdo:audio The property sdo:contentUrl You can use these predicates to provide URLs or references to audio files that can be played or streamed within the LD-browser. In the following example , sdo:audio was used to showcase audio content of the Carnival Festival within the LD-browser. Video \u00b6 These predicates allow you to associate video content with your resources: Class sdo:VideoObject Property sdo:video Property sdo:contentUrl You can use these predicates to provide URLs or references to video files that can be played or streamed within the LD-browser.The video formats that are included in this dataset are \".mp4\", \".webm\", \".ogg\". In the following example , sdo:contentUrl was used to showcase video content of the Kleine Piep within the LD-browser. Linked Data Table \u00b6 The linked data Table shows a dataset at the triple level. The first three columns represent the subject, predicate, and object position of the triple. The fourth column represents the graph to which the triple belongs. The linked data Table can be used to perform simple queries by filling in the subject, predicate, object, and/or graph using the text field. Terms in the linked data Table can be dragged and dropped between columns. This allows a simple form of graph navigation. For example, an object term can be dragged to the subject column in order to show the triples in which that term appears in the subject position. Queries in the linked data Table can also be performed automatically through the Statements API and the Triple Pattern Fragments API . SPARQL IDE \u00b6 When a dataset has a running SPARQL service, the data can be queried from the SPARQL IDE. The SPARQL IDE is an extended version of the Open Source Yasgui query editor. Saving a SPARQL query \u00b6 It is often useful to save a SPARQL query for later use. This is achieved by clicking on the save icon in the top-right corner of the SPARQL Editor. Doing so will create a Save Query . Sharing a SPARQL query \u00b6 It is sometimes useful to share a SPARQL query with somebody else, or to have a cURL command that can be used to run the same SPARQL query from a command line. This is achieved by clicking on the share icon in the top-right corner of the SPARQL Editor. This brings us a dialog from which the SPARQL query can be copied in the following three forms: The URL-encoded SPARQL query. This is a long URL that includes the endpoint, the query, and visualization settings. Notice that this URL can be quite long for complex queries and/or visualizations. Long URLs are not supported by some application that cut off a URL after a maximum length (often 1,024 characters). Use one of the other two options or use Saved Queries to avoid such restrictions. A short URL that redirects to the full URL-encoded SPARQL query. A cURL command that can be copy/pasted into a terminal application that supports this command. cURL is often used by programmers to test HTTP(S) requests. Saved Queries are a more modern way of sharing SPARQL queries. They do not have any of the technical limitations that occur with URL-encoded queries. Transfer a SPARQL query \u00b6 The SPARQL queries could be transferred to another account or an organization. To do that, go to the setting field at the query page, choose transfer: and then choose where the SPARQL query should be moved to: After the destination is set you would be redirected to the SPARQL query new page. The SPARQL query could be transferred from an account to an organization and vice versa. Copy a SPARQL query \u00b6 Users can copy SPARQL queries to another account or an organization. To do that, click on three dots in the upper right corner of the query and choose the copy option: Then, choose where the SPARQL query should be moved to: After setting the destination, you will be redirected to the SPARQL query new page. The SPARQL query can be copied from an account to an organization and vice versa. ElasticSearch \u00b6 When a dataset has a running Elasticsearch service, textual searches can be performed over the entire dataset. Text search with Elasticsearch works like a search engine and returns any node that contains your search term, or contains the search term in any of its properties. It is also possible to write a custom query using the Elasticsearch Query DSL (Domain Specific Language) . GraphQL \u00b6 Some TriplyDB instances also support querying using the GraphQL language . For more information on the schema and possible queries, also read this document . Insights \u00b6 The insights page has been developed to give you a succinct overview of the linked data at hand. It holds two views: the class frequency and the class hierarchy view. Class frequency \u00b6 The class frequency diagram shows how often classes and properties appear in a graph. The drop-down on the top of the visualization selects the graph for which the class frequency is drawn. The visualization shows the 10 most frequent classes in the selected graph. The exact number of occurrences can be seen when hovering over the bar of a class, also showing the complete IRI/prefixed IRI. When clicking on the bar of a class the node will expand and show the 10 most frequent predicates of that class. Class hierarchy \u00b6 The class hierarchy diagram shows the hierarchy of the dataset in three different visualizations. Each of the diagrams are created by the rdfs:subClassOf relations and the classes in the dataset. TriplyDB has three different visualization methods for the classHierarchy: Bubbles visualization Treemap visualization Sunburst visualization All three visualizations are interactive in two ways. It is possible to hover over them, which will show information about the layer the mouse is on, or to click on them, so the visualization zooms in one or more layers. For each visualization it is also possible to zoom out: Bubbles visualization: click the outside of the bubble Treemap visualization: use the breadcrumbs trail shown above the visualization Sunburst visualization: click the innermost circle of the visualization When does the class hierarchy show? \u00b6 A class only appears in the class hierarchy tab if it has instances (connected to the class via rdf:type ). The class hierarchy cannot be shown if it contains a cycle, meaning that some class is (indirectly) its own subclass.","title":"Viewing Data"},{"location":"triply-db-getting-started/viewing-data/#viewing-data","text":"TriplyDB offers several ways to explore your datasets.","title":"Viewing Data"},{"location":"triply-db-getting-started/viewing-data/#linked-data-browser","text":"The linked data browser offers to traverse the data by focusing on one node at the time. The node is describe using it's properties, which can be followed to other nodes in the graph. The following properties provide additional information about your linked data, enabling the LD-browser to display visualizations and provide a better user experience.","title":"Linked Data Browser"},{"location":"triply-db-getting-started/viewing-data/#types","text":"The predicate rdf:type allows you to specify the type or class of a resource in your linked data: By using rdf:type , you can indicate the category or classification of the resource, which can help the LD-browser understand the nature of the data and display it appropriately. In the example below , you can see that \"Iris setosa\" is the type of flowering plant due to the usage of the rdf:type property.","title":"Types"},{"location":"triply-db-getting-started/viewing-data/#labels","text":"Labels are typically used to provide a concise and meaningful title or display name for a resource, making it easier for users to understand the content of your linked data. These predicates allow you to provide human-readable labels or names for your resources: The property rdfs:label The property skos:prefLabel In the example below , the rdfs:label property was used to denote the label(name) of the Pokemon, resulting in the display of \"Pikachu\" above its corresponding image.","title":"Labels"},{"location":"triply-db-getting-started/viewing-data/#descriptions","text":"Descriptions can provide additional context or information about a resource, helping users understand its purpose, content, or significance. These predicates allow you to provide textual descriptions or comments about your resources: The property sdo:description The property rdfs:comment In the following example rdfs:comment was used to provide additional information on Iris Setosa.","title":"Descriptions"},{"location":"triply-db-getting-started/viewing-data/#geo","text":"These are some of the predicates used for representing geographic information in your LD-browser: The property geo:asWKT : This property allows you to specify the geometries of geographic features using the Well-Known Text (WKT) format, which can be visualized on a map in the LD-browser. The property geo:hasGeometry : This property is used to link a geographic feature with its corresponding geometry. The property geo:location : This property is used to indicate the location of a resource using geographic coordinates, such as latitude and longitude, in your linked data. In the following example geo:hasGeometry property was used to showcase a map depicting the location of Instituut voor Beeld en Geluid.","title":"Geo"},{"location":"triply-db-getting-started/viewing-data/#images","text":"These predicates allow you to associate images or visual representations with your resources: Class sdo:ImageObject The property foaf:depiction The property foaf:thumbnail The property foaf:img The property sdo:image The property sdo:contentUrl By using these predicates, you can provide URLs or references to images that can be displayed alongside your linked data in the LD-browser. In the example below , foaf:depiction was used to display picture of Pikachu in the LD-browser:","title":"Images"},{"location":"triply-db-getting-started/viewing-data/#audio","text":"These predicates allow you to associate audio content with your resources: The class sdo:AudioObject The property sdo:audio The property sdo:contentUrl You can use these predicates to provide URLs or references to audio files that can be played or streamed within the LD-browser. In the following example , sdo:audio was used to showcase audio content of the Carnival Festival within the LD-browser.","title":"Audio"},{"location":"triply-db-getting-started/viewing-data/#video","text":"These predicates allow you to associate video content with your resources: Class sdo:VideoObject Property sdo:video Property sdo:contentUrl You can use these predicates to provide URLs or references to video files that can be played or streamed within the LD-browser.The video formats that are included in this dataset are \".mp4\", \".webm\", \".ogg\". In the following example , sdo:contentUrl was used to showcase video content of the Kleine Piep within the LD-browser.","title":"Video"},{"location":"triply-db-getting-started/viewing-data/#linked-data-table","text":"The linked data Table shows a dataset at the triple level. The first three columns represent the subject, predicate, and object position of the triple. The fourth column represents the graph to which the triple belongs. The linked data Table can be used to perform simple queries by filling in the subject, predicate, object, and/or graph using the text field. Terms in the linked data Table can be dragged and dropped between columns. This allows a simple form of graph navigation. For example, an object term can be dragged to the subject column in order to show the triples in which that term appears in the subject position. Queries in the linked data Table can also be performed automatically through the Statements API and the Triple Pattern Fragments API .","title":"Linked Data Table"},{"location":"triply-db-getting-started/viewing-data/#sparql-ide","text":"When a dataset has a running SPARQL service, the data can be queried from the SPARQL IDE. The SPARQL IDE is an extended version of the Open Source Yasgui query editor.","title":"SPARQL IDE"},{"location":"triply-db-getting-started/viewing-data/#saving-a-sparql-query","text":"It is often useful to save a SPARQL query for later use. This is achieved by clicking on the save icon in the top-right corner of the SPARQL Editor. Doing so will create a Save Query .","title":"Saving a SPARQL query"},{"location":"triply-db-getting-started/viewing-data/#sharing-a-sparql-query","text":"It is sometimes useful to share a SPARQL query with somebody else, or to have a cURL command that can be used to run the same SPARQL query from a command line. This is achieved by clicking on the share icon in the top-right corner of the SPARQL Editor. This brings us a dialog from which the SPARQL query can be copied in the following three forms: The URL-encoded SPARQL query. This is a long URL that includes the endpoint, the query, and visualization settings. Notice that this URL can be quite long for complex queries and/or visualizations. Long URLs are not supported by some application that cut off a URL after a maximum length (often 1,024 characters). Use one of the other two options or use Saved Queries to avoid such restrictions. A short URL that redirects to the full URL-encoded SPARQL query. A cURL command that can be copy/pasted into a terminal application that supports this command. cURL is often used by programmers to test HTTP(S) requests. Saved Queries are a more modern way of sharing SPARQL queries. They do not have any of the technical limitations that occur with URL-encoded queries.","title":"Sharing a SPARQL query"},{"location":"triply-db-getting-started/viewing-data/#transfer-a-sparql-query","text":"The SPARQL queries could be transferred to another account or an organization. To do that, go to the setting field at the query page, choose transfer: and then choose where the SPARQL query should be moved to: After the destination is set you would be redirected to the SPARQL query new page. The SPARQL query could be transferred from an account to an organization and vice versa.","title":"Transfer a SPARQL query"},{"location":"triply-db-getting-started/viewing-data/#copy-a-sparql-query","text":"Users can copy SPARQL queries to another account or an organization. To do that, click on three dots in the upper right corner of the query and choose the copy option: Then, choose where the SPARQL query should be moved to: After setting the destination, you will be redirected to the SPARQL query new page. The SPARQL query can be copied from an account to an organization and vice versa.","title":"Copy a SPARQL query"},{"location":"triply-db-getting-started/viewing-data/#elasticsearch","text":"When a dataset has a running Elasticsearch service, textual searches can be performed over the entire dataset. Text search with Elasticsearch works like a search engine and returns any node that contains your search term, or contains the search term in any of its properties. It is also possible to write a custom query using the Elasticsearch Query DSL (Domain Specific Language) .","title":"ElasticSearch"},{"location":"triply-db-getting-started/viewing-data/#graphql","text":"Some TriplyDB instances also support querying using the GraphQL language . For more information on the schema and possible queries, also read this document .","title":"GraphQL"},{"location":"triply-db-getting-started/viewing-data/#insights","text":"The insights page has been developed to give you a succinct overview of the linked data at hand. It holds two views: the class frequency and the class hierarchy view.","title":"Insights"},{"location":"triply-db-getting-started/viewing-data/#class-frequency","text":"The class frequency diagram shows how often classes and properties appear in a graph. The drop-down on the top of the visualization selects the graph for which the class frequency is drawn. The visualization shows the 10 most frequent classes in the selected graph. The exact number of occurrences can be seen when hovering over the bar of a class, also showing the complete IRI/prefixed IRI. When clicking on the bar of a class the node will expand and show the 10 most frequent predicates of that class.","title":"Class frequency"},{"location":"triply-db-getting-started/viewing-data/#class-hierarchy","text":"The class hierarchy diagram shows the hierarchy of the dataset in three different visualizations. Each of the diagrams are created by the rdfs:subClassOf relations and the classes in the dataset. TriplyDB has three different visualization methods for the classHierarchy: Bubbles visualization Treemap visualization Sunburst visualization All three visualizations are interactive in two ways. It is possible to hover over them, which will show information about the layer the mouse is on, or to click on them, so the visualization zooms in one or more layers. For each visualization it is also possible to zoom out: Bubbles visualization: click the outside of the bubble Treemap visualization: use the breadcrumbs trail shown above the visualization Sunburst visualization: click the innermost circle of the visualization","title":"Class hierarchy"},{"location":"triply-db-getting-started/viewing-data/#when-does-the-class-hierarchy-show","text":"A class only appears in the class hierarchy tab if it has instances (connected to the class via rdf:type ). The class hierarchy cannot be shown if it contains a cycle, meaning that some class is (indirectly) its own subclass.","title":"When does the class hierarchy show?"},{"location":"triply-etl/","text":"On this page: TriplyETL Overview Supported standards and formats Supported data formats Why TriplyETL? TriplyETL Overview \u00b6 TriplyETL allows you to create and maintain production-grade linked data pipelines. Getting Started explains how TriplyETL can be used for the first time. CLI explains the commands that are used to install, compile, and run TriplyETL pipelines. The Changelog documents the changes that are introduced in new TriplyETL version. Maintenance explains how TriplyETL can be updated and can be configured to run in automated pipelines. TriplyETL uses the following unique approach: graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] This approach consists of the following six steps (see diagram): Step 1. Extract : extracts a stream of records from one or more data sources . Step 2. Transform : cleans, combines, and extends data in the record . Step 3. Assert : uses data from the record to make linked data assertions in the internal store . Step 4. Enrich : improves and extends linked data in the internal store . Step 5. Validate ensures that linked data in the internal store meets the specified quality criteria. Step 6. Publish : takes the linked data from the internal store , and publishes it to a destination such as TriplyDB . TriplyETL uses the following data storage stages, to connect the six steps in the approach (see diagram): Stage A. Sources : the data inputs to the pipeline. Stage B. Record : provides a uniform representation for data from any source system. Stage C. Internal Store : temporarily holds linked data generated in the pipeline. Stage D. Destinations : places where output from the pipeline is published to, for example TriplyDB . In addition, the following configuration tools are used throughout the six TriplyETL steps: Declarations : introduce constants are reuse throughout the TriplyETL configuration. Control structures : make parts of the TriplyETL configuration optional or repeating (loops). Debug functions : give insights into TriplyETL internals for the purpose of finding issues and performing maintenance. Supported standards and formats \u00b6 TriplyETL follows a multi-paradigm approach. This means that TriplyETL seeks to support a wide variety of data formats, configuration languages, and linked data standards. This allows users to most optimally combine the formats, languages, and standards that they wish to use. Other ETL approaches focus on one format/language/standard, which severely limits what users that use those approaches can do. Supported data formats \u00b6 TriplyETL supports the following data formats through its extractors : CSV (Comma-Separated Values) JSON (JavaScript Object Notation) OAI-PMH (Open Archives Initiative, Protocol for Metadata Harvesting) PostgreSQL (Postgres, SQL) RDF 1.1 (Resource Description Language) TSV (Tab-Separated Values) XLSX (Office Open XML Workbook, Microsoft Excel) XML 1.1 (Extensible Markup Language) TriplyETL implements the latest versions of the linked data standards and best practices: RDF 1.1, SHACL Core, SHACL Advanced, XML Schema Datatypes 1.1, IETF RFC3987 (IRIs), IETF RFC5646 (Language Tags), SPARQL 1.1 Query Languahge, SPARQL 1.1 Update, SPARQL 1.1 Federation, N-Triples 1.1, N-Quads 1.1, Turtle 1.1, TriG 1.1, RDF/XML 1.1, JSON-LD 1.1 (TBA), JSON-LD Framing (TBA), and JSON-LD Algorithms (TBA). Why TriplyETL? \u00b6 TriplyETL has the following core features, that set it apart from other data pipeline products: Backend-agnostic : TriplyETL supports a large number of data source formats and types. Source data is processed in a unified record. This decouples configuration from source format specific. In TriplyETL, changing the source system often only requires changing the extractor. Multi-paradigm : TriplyETL supports all major paradigms for transforming and asserting linked data: SPARQL, SHACL, RML, JSON-LD, XSLT, and RATT (RDF All The Things). You can also write your own transformations in TypeScript for optimal extensibility. Scalable : TriplyETL processes data in a stream of self-contained records. This allows TriplyETL pipelines to run in parallel, ensuring a high pipeline throughput. High Quality : The output of TriplyETL pipelines is automatically validated against the specified data model, and/or against a set of preconfigured 'gold records'. Production-grade : TriplyETL pipelines run in GitLab CI/CD, and support the four DTAP environments that are often used in production systems: Development, Testing, Acceptance, Production.","title":"Overview"},{"location":"triply-etl/#triplyetl-overview","text":"TriplyETL allows you to create and maintain production-grade linked data pipelines. Getting Started explains how TriplyETL can be used for the first time. CLI explains the commands that are used to install, compile, and run TriplyETL pipelines. The Changelog documents the changes that are introduced in new TriplyETL version. Maintenance explains how TriplyETL can be updated and can be configured to run in automated pipelines. TriplyETL uses the following unique approach: graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] This approach consists of the following six steps (see diagram): Step 1. Extract : extracts a stream of records from one or more data sources . Step 2. Transform : cleans, combines, and extends data in the record . Step 3. Assert : uses data from the record to make linked data assertions in the internal store . Step 4. Enrich : improves and extends linked data in the internal store . Step 5. Validate ensures that linked data in the internal store meets the specified quality criteria. Step 6. Publish : takes the linked data from the internal store , and publishes it to a destination such as TriplyDB . TriplyETL uses the following data storage stages, to connect the six steps in the approach (see diagram): Stage A. Sources : the data inputs to the pipeline. Stage B. Record : provides a uniform representation for data from any source system. Stage C. Internal Store : temporarily holds linked data generated in the pipeline. Stage D. Destinations : places where output from the pipeline is published to, for example TriplyDB . In addition, the following configuration tools are used throughout the six TriplyETL steps: Declarations : introduce constants are reuse throughout the TriplyETL configuration. Control structures : make parts of the TriplyETL configuration optional or repeating (loops). Debug functions : give insights into TriplyETL internals for the purpose of finding issues and performing maintenance.","title":"TriplyETL Overview"},{"location":"triply-etl/#supported-standards-and-formats","text":"TriplyETL follows a multi-paradigm approach. This means that TriplyETL seeks to support a wide variety of data formats, configuration languages, and linked data standards. This allows users to most optimally combine the formats, languages, and standards that they wish to use. Other ETL approaches focus on one format/language/standard, which severely limits what users that use those approaches can do.","title":"Supported standards and formats"},{"location":"triply-etl/#supported-data-formats","text":"TriplyETL supports the following data formats through its extractors : CSV (Comma-Separated Values) JSON (JavaScript Object Notation) OAI-PMH (Open Archives Initiative, Protocol for Metadata Harvesting) PostgreSQL (Postgres, SQL) RDF 1.1 (Resource Description Language) TSV (Tab-Separated Values) XLSX (Office Open XML Workbook, Microsoft Excel) XML 1.1 (Extensible Markup Language) TriplyETL implements the latest versions of the linked data standards and best practices: RDF 1.1, SHACL Core, SHACL Advanced, XML Schema Datatypes 1.1, IETF RFC3987 (IRIs), IETF RFC5646 (Language Tags), SPARQL 1.1 Query Languahge, SPARQL 1.1 Update, SPARQL 1.1 Federation, N-Triples 1.1, N-Quads 1.1, Turtle 1.1, TriG 1.1, RDF/XML 1.1, JSON-LD 1.1 (TBA), JSON-LD Framing (TBA), and JSON-LD Algorithms (TBA).","title":"Supported data formats"},{"location":"triply-etl/#why-triplyetl","text":"TriplyETL has the following core features, that set it apart from other data pipeline products: Backend-agnostic : TriplyETL supports a large number of data source formats and types. Source data is processed in a unified record. This decouples configuration from source format specific. In TriplyETL, changing the source system often only requires changing the extractor. Multi-paradigm : TriplyETL supports all major paradigms for transforming and asserting linked data: SPARQL, SHACL, RML, JSON-LD, XSLT, and RATT (RDF All The Things). You can also write your own transformations in TypeScript for optimal extensibility. Scalable : TriplyETL processes data in a stream of self-contained records. This allows TriplyETL pipelines to run in parallel, ensuring a high pipeline throughput. High Quality : The output of TriplyETL pipelines is automatically validated against the specified data model, and/or against a set of preconfigured 'gold records'. Production-grade : TriplyETL pipelines run in GitLab CI/CD, and support the four DTAP environments that are often used in production systems: Development, Testing, Acceptance, Production.","title":"Why TriplyETL?"},{"location":"triply-etl/assert/","text":"On this page: Assert Next steps Assert \u00b6 The Assert step uses data from the Record to add linked data to the Internal Store. graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 2 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] Assertion are statements of fact. In linked data, assertions are commonly called 'triples' or 'quads'. A triple is composed of three parts: a subject term, a predicate term, and an object term. A quad or quadruple also has a fourth graph term. TriplyETL supports the following languages for making linked data assertions: JSON-LD can be used to assert data according to a JSON-LD Context. RATT contains a core set of TypeScript functions for making linked data assertions: Term Assertions : functions that are used to assert terms (IRIs or literals). Statement Assertions : functions that are used to assert statements (triples or quads). RML inserts the data that has been transformed (from a non-RDF format into RDF triples) into the store. XSLT inserts the data that has been transformed (from XML to XML or RDF) using stylesheet parameter in loadRdf() function into the store Next steps \u00b6 After linked data has been asserted into the internal store, the following steps can be preformed: Step 4. Enrich : improves and extends linked data in the internal store. Step 5. Validate ensures that linked data in the internal store meets the specified quality criteria. Step 6. Publish : takes the linked data from the internal store, and publishes it to a destination such as TriplyDB .","title":"Overview"},{"location":"triply-etl/assert/#assert","text":"The Assert step uses data from the Record to add linked data to the Internal Store. graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 2 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] Assertion are statements of fact. In linked data, assertions are commonly called 'triples' or 'quads'. A triple is composed of three parts: a subject term, a predicate term, and an object term. A quad or quadruple also has a fourth graph term. TriplyETL supports the following languages for making linked data assertions: JSON-LD can be used to assert data according to a JSON-LD Context. RATT contains a core set of TypeScript functions for making linked data assertions: Term Assertions : functions that are used to assert terms (IRIs or literals). Statement Assertions : functions that are used to assert statements (triples or quads). RML inserts the data that has been transformed (from a non-RDF format into RDF triples) into the store. XSLT inserts the data that has been transformed (from XML to XML or RDF) using stylesheet parameter in loadRdf() function into the store","title":"Assert"},{"location":"triply-etl/assert/#next-steps","text":"After linked data has been asserted into the internal store, the following steps can be preformed: Step 4. Enrich : improves and extends linked data in the internal store. Step 5. Validate ensures that linked data in the internal store meets the specified quality criteria. Step 6. Publish : takes the linked data from the internal store, and publishes it to a destination such as TriplyDB .","title":"Next steps"},{"location":"triply-etl/assert/json-ld/","text":"On this page: JSON-LD Assert JSON-LD Assert \u00b6 The JSON-LD standard includes the following algorithms that allow linked data to be added to the internal store: The Expansion algorithm allows a JSON-LD context to be applied to the record. The Deserialization algorithm allows linked data to be generated based on the expanded record.","title":"JSON-LD"},{"location":"triply-etl/assert/json-ld/#json-ld-assert","text":"The JSON-LD standard includes the following algorithms that allow linked data to be added to the internal store: The Expansion algorithm allows a JSON-LD context to be applied to the record. The Deserialization algorithm allows linked data to be generated based on the expanded record.","title":"JSON-LD Assert"},{"location":"triply-etl/assert/rml/","text":"On this page: RML Assert RML Assert \u00b6","title":"RML"},{"location":"triply-etl/assert/rml/#rml-assert","text":"","title":"RML Assert"},{"location":"triply-etl/assert/xslt/","text":"On this page: XSLT Assert XSLT Assert \u00b6","title":"XSLT"},{"location":"triply-etl/assert/xslt/#xslt-assert","text":"","title":"XSLT Assert"},{"location":"triply-etl/assert/ratt/statements/","text":"On this page: RATT Statement Assertion nestedPairs() Signature Parameters Example: Unit of measure Example: Geometry Maintenance impact Relation to standards objects() Signature Parameters Example: Alternative labels Maintenance impact Relation to standards pairs() Signature Parameters Example: Alternative and preferred label Maintenance impact Relation to standards quad() Signature Parameters Example: Data and metadata See also quads() Signature Parameters Example: Data and metadata See also triple() Signature Parameters Example: 1 Example: 2 Example: 3 triples() Signature Parameters Example Maintenance impact Relation to standards Implicit casts Relation to standards RATT Statement Assertion \u00b6 This page documents the functions that make linked data statement assertions (triples and quads). The statement assertion functions are imported as follows: import { nestedPairs, objects, pairs, quad, quads, triple, triples } from '@triplyetl/etl/ratt' nestedPairs() \u00b6 Creates a nested node and makes multiple assertions about that node. Since linked data is composed of triples, more complex n-ary information must often be asserted by using a nested node. Since they must appear in both the subject and object term position, nested nodes are required to be IRIs. Signature \u00b6 This function has the following signature: nestedPairs(subject, predicate, ...pairs) // [1] nestedPairs(subject, predicate, nestedNode, ...pairs) // [2] nestedPairs(graph, subject, predicate, nestedNode, ...pairs) // [3] When Signature 1 is used, the nested node is automatically generated by TriplyETL. This automatically generated nested node is a Skolem IRI (see the documentation on Skolem IRIs for more information). When Signature 2 is used, the nested node must be specified by the user. This allows the nested node to be created in a more structured way, for example by using iri() , addIri() , or addHashedIri() . When Signature 3 is used, a graph name must be specified, resulting in quad statements. Parameters \u00b6 graph is a graph term; this must be an IRI. subject is a subject term; this must be an IRI. predicate is a predicate term; this must be an IRI. nestedNode is the nested node; this must be an IRI. pairs is one or more pairs that make assertions about the nested node. Every pair consists of a predicate term and an object term (in that order). Example: Unit of measure \u00b6 The following example asserts a value together with a unit of measure. Since Signature 1 is used, a Skolem IRI is used as the nested node. fromJson([{ id: '1', height: 15 }]), nestedPairs(iri(prefix.product, 'id'), sdo.height, [qudt.unit, unit.CentiM], [rdf.value, 'height'], ), This makes the following linked data assertions: product:1 sdo:height [ qudt:unit unit:CentiM; rdf:value 15 ]. Or diagrammatically: graph LR product -- sdo:height --> skolem skolem -- qudt:unit --> centim skolem -- rdf:value --> 15 product[product:1]:::data skolem[_:1]:::data centim[unit:CentiM]:::model 15:::data classDef data fill:yellow classDef model fill:lightblue classDef meta fill:sandybrown Example: Geometry \u00b6 The following example asserts a GeoSPARQL geometry. The geometry is created as a separate node. fromJson([{ id: '1', geometry: 'Point(1.1 2.2)' }]), nestedPairs(iri(prefix.feature, 'id'), geo.hasGeometry, iri(prefix.geometry, 'id'), [a, geo.Geometry], [geo.asWKT, literal('geometry', geo.wktLiteral)], ), This generates the following linked data: feature:1 geo:hasGeometry geometry:1. geometry:1 a geo:Geometry; geo:asWKT 'Point(1.1 2.2)'^^geo:wktLiteral. Or diagrammatically: graph LR feature -- geo:hasGeometry --> geometry geometry -- a --> Geometry geometry -- geo:asWKT --> wkt feature[feature:1]:::data geometry[geometry:1]:::data Geometry[geo:Geometry]:::model wkt[\"'Point(1.1 2.2)'^^geo:wktLiteral\"]:::data classDef data fill:yellow classDef model fill:lightblue classDef meta fill:sandybrown Maintenance impact \u00b6 Every use of nestedPairs() can be replaced by multiple uses of other assertion functions, like triple() , quad() , pairs() , and addSkolemIri() . For example, when the geometry example is rewritten to not use nestedPairs() , the nested node must be specified twice, which is a maintenance burden: fromJson([{ id: '1', geometry: 'Point(1.1 2.2)' }]), triple(iri(prefix.feature, 'id'), geo.hasGeometry, iri(prefix.geometry, 'id')), pairs(iri(prefix.geometry, 'id'), [a, geo.Geometry], [geo.asWKT, literal('geometry', geo.wktLiteral)], ), Notice that the use of nestedPairs() results in configuration that is shorter and easier to maintain. For example, nestedPairs() does not need to repeat the specification of the nested node iri(prefix.geometry, 'id') . Relation to standards \u00b6 The functionality of nestedPairs() is similar to anonymous node notation and predicate list notation in the linked data standards TriG, Turtle, and SPARQL. Notice that the following notation in these standards: feature:id geo:hasGeometry [ a geo:Geometry; geo:asWKT 'Point(1.1 2.2)'^^geo:wktLiteral ]. is structurally similar to the following code snippet that uses nestedPairs() : nestedPairs(iri(prefix.feature, 'id'), geo.hasGeometry, [a, geo.Geometry], [geo.asWKT, literal('geometry', geo.wktLiteral)], ), objects() \u00b6 Asserts multiple triples that share the same subject and predicate term. Signature \u00b6 This function has the following signature: objects(subject, predicate, ...objects) Parameters \u00b6 subject is a subject term; this must be an IRI. predicate is a predicate term; this must be an IRI. objects is one or more object terms; these must be IRIs and/or literals. Example: Alternative labels \u00b6 The following snippet asserts multiple alternative labels for a city: fromJson([{ name: 'Ney York', alt1: 'The Big Apple', alt2: 'The Capital of the World', alt3: 'The City of Dreams', alt4: 'The City That Never Sleeps', }]), objects(iri(prefix.city, 'name'), skos.altLabel, 'alt1', 'alt2', 'alt3', 'alt4', ), This results in the following 4 linked data assertions: city:New%20York skos:altLabel 'The Big Apple'@en. 'The Capital of the World'@en, 'The City of Dreams'@en, 'The City That Never Sleeps'@en. Or diagrammatically: graph LR newYork -- skos:altLabel --> a & b & c & d newYork[city:New%20York]:::data a[\"'The Big Apple'@en\"]:::data b[\"'The Capital of the World'@en\"]:::data c[\"'The City of Dreams'@en\"]:::data d[\"'The City That Never Sleeps'@en\"]:::data classDef data fill:yellow Maintenance impact \u00b6 Every use of objects() can be replaced by multiple uses of the triple() assertion function. However, doing so requires the subject and predicate terms to be repeated for each use of triple() . This is why the use of objects() results in configuration that is shorter and easier to maintain. With the release of 4.0.0 version, it is no longer allowed to have less than 2 objects, otherwise refer to transformation triple() . Relation to standards \u00b6 The functionality of objects() is similar to predicate-object list notation in the linked data standards TriG, Turtle, and SPARQL. Notice that the following notation in these standards: city:New%20York skos:altLabel 'The Big Apple', 'The Capital of the World', 'The City of Dreams', 'The City That Never Sleeps'. is structurally similar to the following code snippet that uses objects() : objects(iri(prefix.city, 'name'), skos.altLabel, 'alt1', 'alt2', 'alt3', 'alt4', ), pairs() \u00b6 Asserts multiple triples that share the same subject term. Signature \u00b6 This function has the following signature: pairs(subject, ...pairs) // [1] pairs(graph, subject, ...pairs) // [2] Signature 1 asserts triples, while Signature 2 asserts quads. Parameters \u00b6 graph is a graph term; this must be an IRI subject is a subject term; this must be an IRI. pairs is one or more pairs that make assertions about the subject term. Every pair consists of a predicate term and an object term (in that order). Example: Alternative and preferred label \u00b6 The following snippet asserts a preferred label and an alternative label for cities: fromJson([ { name: 'London', alt: 'Home of the Big Ben' }, { name: 'Ney York', alt: 'The Big Apple' }, ]), pairs(iri(prefix.city, 'name'), [skos.prefLabel, literal('name', lang.en)], [skos.altLabel, literal('alt', lang.en)], ), This results in the following 4 linked data assertions: city:London skos:prefLabel 'London'@en; skos:altLabel 'Home of the Big Ben'@en. city:New%20York skos:prefLabel 'New York'@en; skos:altLabel 'The Big Apple'@en. Or diagrammatically: graph LR london -- skos:altLabel --> a london -- skos:prefLabel --> b newYork -- skos:altLabel --> c newYork -- skos:prefLabel --> d london[city:London]:::data newYork[city:New%20York]:::data a[\"'Home of the Big Ben'@en\"]:::data b[\"'London'@en\"]:::data c[\"'The Big Apple'@en\"]:::data d[\"'New York'@en\"]:::data classDef data fill:yellow Maintenance impact \u00b6 This function provides a shorthand notation for assertions that can also be made with multiple uses of assertion triple() . Relation to standards \u00b6 The notational convenience of this middleware is similar to predicate lists in TriG, Turtle, and SPARQL. Notice that the following notation in these standards: city:New%20York skos:prefLabel 'New York'@en; skos:altLabel 'The Big Apple'@en. is structurally similar to the following code snippet that uses pairs() : pairs(iri(prefix.city, 'name'), [skos.prefLabel, literal('name', lang.en)], [skos.altLabel, literal('alt', lang.en)], ), quad() \u00b6 Asserts a linked data statement that consists of four terms: subject, predicate, object, and graph (in that order). A quadruple or 'quad' is a triple to which a graph name is added. Signature \u00b6 This function has the following signature: quad(subject, predicate, object, graph) Parameters \u00b6 subject is a subject term; this must be an IRI. predicate is a predicate term; this must be an IRI. object is an object term; this must be an IRI or literal. graph is a graph term; this must be an IRI. Example: Data and metadata \u00b6 A dataset may distinguish between data statements and metadata statements. Such a distinction can be implemented by placing statements into different graphs. The following code snippet makes one statements assertion in a metadata graph and one statement assertion in a data graph: quad(iri(prefix.dataset, 'flowers'), a, dcat.Dataset, graph.metadata), quad(iri(prefix.flower, '_id'), a, def.Flower, graph.data), See also \u00b6 Use function quads() to make multiple quad assertions. quads() \u00b6 Asserts multiple linked data statements that consists of four terms: subject, predicate, object, and graph (in that order). A quadruple or 'quad' is a triple to which a graph name is added. Signature \u00b6 This function has the following signature: quads(...quads) Parameters \u00b6 quads is one or more quads, represented by arrays that contain four terms: subject, predicate, object, and graph (in that order). Example: Data and metadata \u00b6 An ETL can distinguish between data and metadata assertions. Both may be placed into distinct graphs. The following snippet makes assertions in a metadata graph and assertions in a data graph. quads( [iri(prefix.dataset, 'flowers'), a, dcat.Dataset, graph.metadata], ..., ), quads( [iri(prefix.flower, '_id'), a, def.Flower, graph.data], ..., ), See also \u00b6 Use function quad() for asserting a single quad. triple() \u00b6 Asserts a linked data statement that consists of three terms: subject, predicate, and object (in that order). A triple asserts a factual statement, claiming that the thing denoted by the subject term and the thing denotes by the object term are related to one another according to the relationship denoted by the predicate term. A triple is the smallest unit of meaning in linked data. Signature \u00b6 This function has the following signature: triple(subject, predicate, object) Parameters \u00b6 subject is a subject term; this must be an IRI. predicate is a predicate term; this must be an IRI. object is an object term; this must be an IRI or literal. Example: 1 \u00b6 The following triple asserts that someone is a person: triple(iri(prefix.person, 'id'), a, foaf.Person), The subject term is an IRI that is constructed from an IRI prefix ( prefix.person ) and a key that contains the IRI local name ( 'id' ). The predicate and object terms are IRIs that are imported from external vocabularies . Example: 2 \u00b6 The following triple asserts that someone has an age that is derived from the 'age' key in the record: triple('_person', foaf.age, literal('age', xsd.nonNegativeInteger)), The subject term is an IRI that is stored in the '_person' key of the record. This term was created previously, for example by using the addIri() function. The predicate term is imported from an external vocabulary . The object term is a typed literal that is constructed from a key ( 'age' ) that contains the lexical form, and a datatype IRI that is imported from an external vocabulary. Example: 3 \u00b6 The following triple uses three static IRIs: triple(Iri('https://example.com/id/123'), a, sdo.Product), triples() \u00b6 Asserts multiple linked data statements that consists of three terms: subject, predicate, and object (in that order). Signature \u00b6 This function has the following signature: triples(graph, ...triples) Parameters \u00b6 graph is a graph term; this must be an IRI. triples is one or more triples, represented by arrays that contain three terms: subject, predicate, and object (in that order). Example \u00b6 Suppose that we want to distinguish between data and metadata assertions. We can do so by asserting them in distinct graphs. The following makes multiple metadata assertions in the metadata graph, followed by multiple data assertions in the data graph. triples(graph.metadata, [iri(prefix.dataset, str('flowers')), a, dcat.Dataset], ... ), triples(graph.data, [iri(prefix.flower, '_id'), a, def.Flower], ... ), Maintenance impact \u00b6 It is common for multiple statements to occur in the same graph. In such cases, it is position to use the quad() function multiple times, but this requires repeating the graph term. This is why the use of triples() results in configuration that is shorter and easier to maintain. Relation to standards \u00b6 The functionality of triples() is conceptually similar to graph notation in the linked data standard TriG. Notice that the following notation in TriG: graph:metadata { dataset:flowers a dcat:Dataset. ... } graph:data { flower:123 a def:Flower. ... } is structurally similar to the following code snippet that uses triples() : triples(graph.metadata, [iri(prefix.dataset, str('flowers')), a, dcat.Dataset], ... ), triples(graph.data, [iri(prefix.flower, '_id'), a, def.Flower], ... ), Implicit casts \u00b6 The statement assertion functions use implicit casting from strings to IRIs or literals. The rules for this are as follows: If a string value that encodes a valid IRI is specified in the subject, predicate, or graph position, that string is implicitly cast to an IRI. If a string value is specified in the object position, that string is implicitly cast to a literal. The following code snippet uses implicit casts for all fours terms in the quad() assertion: fromJson([{ url: 'https://example.com/123' }]), quad('url', 'url', 'url', 'url'), This results in the following linked data: { 'https://example.com/123'. } Notice that the code snippet can be rewritten to make use of explicit casts: fromJson([{ url: 'https://example.com/123' }]), quad(iri('url'), iri('url'), literal('url'), iri('url')), Relation to standards \u00b6 The functionality of implicit casts for literals is conceptually similar to shorthand notation for xsd:string literals in the linked data standards Turtle, TriG, and SPARQL. Notice that the following notation in Turtle: city:amsterdam dct:identifier '0200'. is structurally similar to the following code snippet that uses an implicit cast for the string literal: triple('_city', dct.identifier, 'id'),","title":"RATT Statements"},{"location":"triply-etl/assert/ratt/statements/#ratt-statement-assertion","text":"This page documents the functions that make linked data statement assertions (triples and quads). The statement assertion functions are imported as follows: import { nestedPairs, objects, pairs, quad, quads, triple, triples } from '@triplyetl/etl/ratt'","title":"RATT Statement Assertion"},{"location":"triply-etl/assert/ratt/statements/#nestedpairs","text":"Creates a nested node and makes multiple assertions about that node. Since linked data is composed of triples, more complex n-ary information must often be asserted by using a nested node. Since they must appear in both the subject and object term position, nested nodes are required to be IRIs.","title":"nestedPairs()"},{"location":"triply-etl/assert/ratt/statements/#signature","text":"This function has the following signature: nestedPairs(subject, predicate, ...pairs) // [1] nestedPairs(subject, predicate, nestedNode, ...pairs) // [2] nestedPairs(graph, subject, predicate, nestedNode, ...pairs) // [3] When Signature 1 is used, the nested node is automatically generated by TriplyETL. This automatically generated nested node is a Skolem IRI (see the documentation on Skolem IRIs for more information). When Signature 2 is used, the nested node must be specified by the user. This allows the nested node to be created in a more structured way, for example by using iri() , addIri() , or addHashedIri() . When Signature 3 is used, a graph name must be specified, resulting in quad statements.","title":"Signature"},{"location":"triply-etl/assert/ratt/statements/#parameters","text":"graph is a graph term; this must be an IRI. subject is a subject term; this must be an IRI. predicate is a predicate term; this must be an IRI. nestedNode is the nested node; this must be an IRI. pairs is one or more pairs that make assertions about the nested node. Every pair consists of a predicate term and an object term (in that order).","title":"Parameters"},{"location":"triply-etl/assert/ratt/statements/#example-unit-of-measure","text":"The following example asserts a value together with a unit of measure. Since Signature 1 is used, a Skolem IRI is used as the nested node. fromJson([{ id: '1', height: 15 }]), nestedPairs(iri(prefix.product, 'id'), sdo.height, [qudt.unit, unit.CentiM], [rdf.value, 'height'], ), This makes the following linked data assertions: product:1 sdo:height [ qudt:unit unit:CentiM; rdf:value 15 ]. Or diagrammatically: graph LR product -- sdo:height --> skolem skolem -- qudt:unit --> centim skolem -- rdf:value --> 15 product[product:1]:::data skolem[_:1]:::data centim[unit:CentiM]:::model 15:::data classDef data fill:yellow classDef model fill:lightblue classDef meta fill:sandybrown","title":"Example: Unit of measure"},{"location":"triply-etl/assert/ratt/statements/#example-geometry","text":"The following example asserts a GeoSPARQL geometry. The geometry is created as a separate node. fromJson([{ id: '1', geometry: 'Point(1.1 2.2)' }]), nestedPairs(iri(prefix.feature, 'id'), geo.hasGeometry, iri(prefix.geometry, 'id'), [a, geo.Geometry], [geo.asWKT, literal('geometry', geo.wktLiteral)], ), This generates the following linked data: feature:1 geo:hasGeometry geometry:1. geometry:1 a geo:Geometry; geo:asWKT 'Point(1.1 2.2)'^^geo:wktLiteral. Or diagrammatically: graph LR feature -- geo:hasGeometry --> geometry geometry -- a --> Geometry geometry -- geo:asWKT --> wkt feature[feature:1]:::data geometry[geometry:1]:::data Geometry[geo:Geometry]:::model wkt[\"'Point(1.1 2.2)'^^geo:wktLiteral\"]:::data classDef data fill:yellow classDef model fill:lightblue classDef meta fill:sandybrown","title":"Example: Geometry"},{"location":"triply-etl/assert/ratt/statements/#maintenance-impact","text":"Every use of nestedPairs() can be replaced by multiple uses of other assertion functions, like triple() , quad() , pairs() , and addSkolemIri() . For example, when the geometry example is rewritten to not use nestedPairs() , the nested node must be specified twice, which is a maintenance burden: fromJson([{ id: '1', geometry: 'Point(1.1 2.2)' }]), triple(iri(prefix.feature, 'id'), geo.hasGeometry, iri(prefix.geometry, 'id')), pairs(iri(prefix.geometry, 'id'), [a, geo.Geometry], [geo.asWKT, literal('geometry', geo.wktLiteral)], ), Notice that the use of nestedPairs() results in configuration that is shorter and easier to maintain. For example, nestedPairs() does not need to repeat the specification of the nested node iri(prefix.geometry, 'id') .","title":"Maintenance impact"},{"location":"triply-etl/assert/ratt/statements/#relation-to-standards","text":"The functionality of nestedPairs() is similar to anonymous node notation and predicate list notation in the linked data standards TriG, Turtle, and SPARQL. Notice that the following notation in these standards: feature:id geo:hasGeometry [ a geo:Geometry; geo:asWKT 'Point(1.1 2.2)'^^geo:wktLiteral ]. is structurally similar to the following code snippet that uses nestedPairs() : nestedPairs(iri(prefix.feature, 'id'), geo.hasGeometry, [a, geo.Geometry], [geo.asWKT, literal('geometry', geo.wktLiteral)], ),","title":"Relation to standards"},{"location":"triply-etl/assert/ratt/statements/#objects","text":"Asserts multiple triples that share the same subject and predicate term.","title":"objects()"},{"location":"triply-etl/assert/ratt/statements/#signature_1","text":"This function has the following signature: objects(subject, predicate, ...objects)","title":"Signature"},{"location":"triply-etl/assert/ratt/statements/#parameters_1","text":"subject is a subject term; this must be an IRI. predicate is a predicate term; this must be an IRI. objects is one or more object terms; these must be IRIs and/or literals.","title":"Parameters"},{"location":"triply-etl/assert/ratt/statements/#example-alternative-labels","text":"The following snippet asserts multiple alternative labels for a city: fromJson([{ name: 'Ney York', alt1: 'The Big Apple', alt2: 'The Capital of the World', alt3: 'The City of Dreams', alt4: 'The City That Never Sleeps', }]), objects(iri(prefix.city, 'name'), skos.altLabel, 'alt1', 'alt2', 'alt3', 'alt4', ), This results in the following 4 linked data assertions: city:New%20York skos:altLabel 'The Big Apple'@en. 'The Capital of the World'@en, 'The City of Dreams'@en, 'The City That Never Sleeps'@en. Or diagrammatically: graph LR newYork -- skos:altLabel --> a & b & c & d newYork[city:New%20York]:::data a[\"'The Big Apple'@en\"]:::data b[\"'The Capital of the World'@en\"]:::data c[\"'The City of Dreams'@en\"]:::data d[\"'The City That Never Sleeps'@en\"]:::data classDef data fill:yellow","title":"Example: Alternative labels"},{"location":"triply-etl/assert/ratt/statements/#maintenance-impact_1","text":"Every use of objects() can be replaced by multiple uses of the triple() assertion function. However, doing so requires the subject and predicate terms to be repeated for each use of triple() . This is why the use of objects() results in configuration that is shorter and easier to maintain. With the release of 4.0.0 version, it is no longer allowed to have less than 2 objects, otherwise refer to transformation triple() .","title":"Maintenance impact"},{"location":"triply-etl/assert/ratt/statements/#relation-to-standards_1","text":"The functionality of objects() is similar to predicate-object list notation in the linked data standards TriG, Turtle, and SPARQL. Notice that the following notation in these standards: city:New%20York skos:altLabel 'The Big Apple', 'The Capital of the World', 'The City of Dreams', 'The City That Never Sleeps'. is structurally similar to the following code snippet that uses objects() : objects(iri(prefix.city, 'name'), skos.altLabel, 'alt1', 'alt2', 'alt3', 'alt4', ),","title":"Relation to standards"},{"location":"triply-etl/assert/ratt/statements/#pairs","text":"Asserts multiple triples that share the same subject term.","title":"pairs()"},{"location":"triply-etl/assert/ratt/statements/#signature_2","text":"This function has the following signature: pairs(subject, ...pairs) // [1] pairs(graph, subject, ...pairs) // [2] Signature 1 asserts triples, while Signature 2 asserts quads.","title":"Signature"},{"location":"triply-etl/assert/ratt/statements/#parameters_2","text":"graph is a graph term; this must be an IRI subject is a subject term; this must be an IRI. pairs is one or more pairs that make assertions about the subject term. Every pair consists of a predicate term and an object term (in that order).","title":"Parameters"},{"location":"triply-etl/assert/ratt/statements/#example-alternative-and-preferred-label","text":"The following snippet asserts a preferred label and an alternative label for cities: fromJson([ { name: 'London', alt: 'Home of the Big Ben' }, { name: 'Ney York', alt: 'The Big Apple' }, ]), pairs(iri(prefix.city, 'name'), [skos.prefLabel, literal('name', lang.en)], [skos.altLabel, literal('alt', lang.en)], ), This results in the following 4 linked data assertions: city:London skos:prefLabel 'London'@en; skos:altLabel 'Home of the Big Ben'@en. city:New%20York skos:prefLabel 'New York'@en; skos:altLabel 'The Big Apple'@en. Or diagrammatically: graph LR london -- skos:altLabel --> a london -- skos:prefLabel --> b newYork -- skos:altLabel --> c newYork -- skos:prefLabel --> d london[city:London]:::data newYork[city:New%20York]:::data a[\"'Home of the Big Ben'@en\"]:::data b[\"'London'@en\"]:::data c[\"'The Big Apple'@en\"]:::data d[\"'New York'@en\"]:::data classDef data fill:yellow","title":"Example: Alternative and preferred label"},{"location":"triply-etl/assert/ratt/statements/#maintenance-impact_2","text":"This function provides a shorthand notation for assertions that can also be made with multiple uses of assertion triple() .","title":"Maintenance impact"},{"location":"triply-etl/assert/ratt/statements/#relation-to-standards_2","text":"The notational convenience of this middleware is similar to predicate lists in TriG, Turtle, and SPARQL. Notice that the following notation in these standards: city:New%20York skos:prefLabel 'New York'@en; skos:altLabel 'The Big Apple'@en. is structurally similar to the following code snippet that uses pairs() : pairs(iri(prefix.city, 'name'), [skos.prefLabel, literal('name', lang.en)], [skos.altLabel, literal('alt', lang.en)], ),","title":"Relation to standards"},{"location":"triply-etl/assert/ratt/statements/#quad","text":"Asserts a linked data statement that consists of four terms: subject, predicate, object, and graph (in that order). A quadruple or 'quad' is a triple to which a graph name is added.","title":"quad()"},{"location":"triply-etl/assert/ratt/statements/#signature_3","text":"This function has the following signature: quad(subject, predicate, object, graph)","title":"Signature"},{"location":"triply-etl/assert/ratt/statements/#parameters_3","text":"subject is a subject term; this must be an IRI. predicate is a predicate term; this must be an IRI. object is an object term; this must be an IRI or literal. graph is a graph term; this must be an IRI.","title":"Parameters"},{"location":"triply-etl/assert/ratt/statements/#example-data-and-metadata","text":"A dataset may distinguish between data statements and metadata statements. Such a distinction can be implemented by placing statements into different graphs. The following code snippet makes one statements assertion in a metadata graph and one statement assertion in a data graph: quad(iri(prefix.dataset, 'flowers'), a, dcat.Dataset, graph.metadata), quad(iri(prefix.flower, '_id'), a, def.Flower, graph.data),","title":"Example: Data and metadata"},{"location":"triply-etl/assert/ratt/statements/#see-also","text":"Use function quads() to make multiple quad assertions.","title":"See also"},{"location":"triply-etl/assert/ratt/statements/#quads","text":"Asserts multiple linked data statements that consists of four terms: subject, predicate, object, and graph (in that order). A quadruple or 'quad' is a triple to which a graph name is added.","title":"quads()"},{"location":"triply-etl/assert/ratt/statements/#signature_4","text":"This function has the following signature: quads(...quads)","title":"Signature"},{"location":"triply-etl/assert/ratt/statements/#parameters_4","text":"quads is one or more quads, represented by arrays that contain four terms: subject, predicate, object, and graph (in that order).","title":"Parameters"},{"location":"triply-etl/assert/ratt/statements/#example-data-and-metadata_1","text":"An ETL can distinguish between data and metadata assertions. Both may be placed into distinct graphs. The following snippet makes assertions in a metadata graph and assertions in a data graph. quads( [iri(prefix.dataset, 'flowers'), a, dcat.Dataset, graph.metadata], ..., ), quads( [iri(prefix.flower, '_id'), a, def.Flower, graph.data], ..., ),","title":"Example: Data and metadata"},{"location":"triply-etl/assert/ratt/statements/#see-also_1","text":"Use function quad() for asserting a single quad.","title":"See also"},{"location":"triply-etl/assert/ratt/statements/#triple","text":"Asserts a linked data statement that consists of three terms: subject, predicate, and object (in that order). A triple asserts a factual statement, claiming that the thing denoted by the subject term and the thing denotes by the object term are related to one another according to the relationship denoted by the predicate term. A triple is the smallest unit of meaning in linked data.","title":"triple()"},{"location":"triply-etl/assert/ratt/statements/#signature_5","text":"This function has the following signature: triple(subject, predicate, object)","title":"Signature"},{"location":"triply-etl/assert/ratt/statements/#parameters_5","text":"subject is a subject term; this must be an IRI. predicate is a predicate term; this must be an IRI. object is an object term; this must be an IRI or literal.","title":"Parameters"},{"location":"triply-etl/assert/ratt/statements/#example-1","text":"The following triple asserts that someone is a person: triple(iri(prefix.person, 'id'), a, foaf.Person), The subject term is an IRI that is constructed from an IRI prefix ( prefix.person ) and a key that contains the IRI local name ( 'id' ). The predicate and object terms are IRIs that are imported from external vocabularies .","title":"Example: 1"},{"location":"triply-etl/assert/ratt/statements/#example-2","text":"The following triple asserts that someone has an age that is derived from the 'age' key in the record: triple('_person', foaf.age, literal('age', xsd.nonNegativeInteger)), The subject term is an IRI that is stored in the '_person' key of the record. This term was created previously, for example by using the addIri() function. The predicate term is imported from an external vocabulary . The object term is a typed literal that is constructed from a key ( 'age' ) that contains the lexical form, and a datatype IRI that is imported from an external vocabulary.","title":"Example: 2"},{"location":"triply-etl/assert/ratt/statements/#example-3","text":"The following triple uses three static IRIs: triple(Iri('https://example.com/id/123'), a, sdo.Product),","title":"Example: 3"},{"location":"triply-etl/assert/ratt/statements/#triples","text":"Asserts multiple linked data statements that consists of three terms: subject, predicate, and object (in that order).","title":"triples()"},{"location":"triply-etl/assert/ratt/statements/#signature_6","text":"This function has the following signature: triples(graph, ...triples)","title":"Signature"},{"location":"triply-etl/assert/ratt/statements/#parameters_6","text":"graph is a graph term; this must be an IRI. triples is one or more triples, represented by arrays that contain three terms: subject, predicate, and object (in that order).","title":"Parameters"},{"location":"triply-etl/assert/ratt/statements/#example","text":"Suppose that we want to distinguish between data and metadata assertions. We can do so by asserting them in distinct graphs. The following makes multiple metadata assertions in the metadata graph, followed by multiple data assertions in the data graph. triples(graph.metadata, [iri(prefix.dataset, str('flowers')), a, dcat.Dataset], ... ), triples(graph.data, [iri(prefix.flower, '_id'), a, def.Flower], ... ),","title":"Example"},{"location":"triply-etl/assert/ratt/statements/#maintenance-impact_3","text":"It is common for multiple statements to occur in the same graph. In such cases, it is position to use the quad() function multiple times, but this requires repeating the graph term. This is why the use of triples() results in configuration that is shorter and easier to maintain.","title":"Maintenance impact"},{"location":"triply-etl/assert/ratt/statements/#relation-to-standards_3","text":"The functionality of triples() is conceptually similar to graph notation in the linked data standard TriG. Notice that the following notation in TriG: graph:metadata { dataset:flowers a dcat:Dataset. ... } graph:data { flower:123 a def:Flower. ... } is structurally similar to the following code snippet that uses triples() : triples(graph.metadata, [iri(prefix.dataset, str('flowers')), a, dcat.Dataset], ... ), triples(graph.data, [iri(prefix.flower, '_id'), a, def.Flower], ... ),","title":"Relation to standards"},{"location":"triply-etl/assert/ratt/statements/#implicit-casts","text":"The statement assertion functions use implicit casting from strings to IRIs or literals. The rules for this are as follows: If a string value that encodes a valid IRI is specified in the subject, predicate, or graph position, that string is implicitly cast to an IRI. If a string value is specified in the object position, that string is implicitly cast to a literal. The following code snippet uses implicit casts for all fours terms in the quad() assertion: fromJson([{ url: 'https://example.com/123' }]), quad('url', 'url', 'url', 'url'), This results in the following linked data: { 'https://example.com/123'. } Notice that the code snippet can be rewritten to make use of explicit casts: fromJson([{ url: 'https://example.com/123' }]), quad(iri('url'), iri('url'), literal('url'), iri('url')),","title":"Implicit casts"},{"location":"triply-etl/assert/ratt/statements/#relation-to-standards_4","text":"The functionality of implicit casts for literals is conceptually similar to shorthand notation for xsd:string literals in the linked data standards Turtle, TriG, and SPARQL. Notice that the following notation in Turtle: city:amsterdam dct:identifier '0200'. is structurally similar to the following code snippet that uses an implicit cast for the string literal: triple('_city', dct.identifier, 'id'),","title":"Relation to standards"},{"location":"triply-etl/assert/ratt/terms/","text":"On this page: RATT Term Assertion Iri() constructor Signature Parameters Example: IRI declaration Example: in-line IRI Example: IRI concatenation iri() function Signature Parameters Example: explicit cast to IRI Example: dynamic IRI Example: static IRI See also iris() Signature Parameters Example list() Signature Parameters Example: fruit basket Example: children Maintenance impact Relation to standards literal() Signature Parameters Example: language-tagged string Example: typed literal Example: string literal See also literals() Signature Parameters Example: fruit basket Example: string literals str() Signature Parameters Example Relation to standards RATT Term Assertion \u00b6 This page documents RATT functions that are used to create RDF terms. These RDF terms are used in statement assertions . The term assertion functions are imported in the following way: import { iri, iris, literal, literals, str } from '@triplyetl/etl/ratt' Iri() constructor \u00b6 Creates static IRIs. Signature \u00b6 The signature of this constructor is as follows: Iri(string): Iri Parameters \u00b6 string is a string that encodes an absolute IRI. Once an IRI object is constructed, the concat() member function can be used to create new IRIs according to the following signature: Iri.concat(string): Iri Example: IRI declaration \u00b6 The following code snippet creates a static IRI by using the Iri() constructor: const subject = Iri('https://example.com/123') etl.use( triple(subject, a, sdo.Product), ) Example: in-line IRI \u00b6 It is also possible to add the static IRI in-line, without declaring a constant: etl.use( triple(Iri('https://example.com/123'), a, sdo.Product), ) Example: IRI concatenation \u00b6 The following code snippet uses a static IRI that is created by applying the concat() member function const prefix = Iri('https://example.com/') etl.use( triple(prefix.concat('123'), a, sdo.Product), ) iri() function \u00b6 Creates a static or dynamic IRI that can be used in statement assertions. Notice that this function is more powerful than the Iri() constructor , which can only create static Signature \u00b6 This function has the following signature: iri(fullIri) // 1 iri(prefix, localName) // 2 Signature 1 is used to explicitly cast a strings that encodes an absolute IRI to an IRI. Signature 2 is used to create IRIs based on an IRI prefix and multiple local names. Parameters \u00b6 fullIri is either a key that contains a dynamic string that encodes an absolute IRI, or a static string that encodes an absolute IRI. prefix is an IRI prefix that is declared with the Iri() constructor . localName is ither a key that contains a dynamic string, or a static string. This string is used as the local name of the IRI. The local name is suffixed to the given IRI prefix. Example: explicit cast to IRI \u00b6 The following code snippets casts strings that encode IRIs in the source data to subject and object IRIs that are used in triple assertions: fromJson([ { url: 'https://example.com/id/person/Jane' }, { url: 'https://example.com/id/person/John' }, ]), triple(iri('url'), owl:sameAs, iri('url')), This results in the following linked data: owl:sameAs . owl:sameAs . Notice that the use of iri() it is not required in the subject position, but is required in the object position. The following code snippet results in the same linked data, but uses an implicit cast for the subject term: fromJson([ { url: 'https://example.com/id/person/Jane' }, { url: 'https://example.com/id/person/John' }, ]), triple('url', owl:sameAs, iri('url')), See the section on automatic casts for more information. Example: dynamic IRI \u00b6 The following code snippet asserts an IRI based on a declared prefix ( prefix.ex ) and the string stored in key ( 'firstName' ): fromJson([{ firstName: 'Jane' }, { firstName: 'John' }]), triple(iri(prefix.person, 'fistName'), a, sdo.Person), This creates a dynamic IRI . This means that the asserted IRI depends on the content of the 'firstName' key in each record. For the first record, IRI person:Jane is created. For the second record, IRI person:John is created. Example: static IRI \u00b6 The following asserts an IRI based on a declared prefix ( prefix.ex ) and a static string (see function str() ): triple(iri(prefix.person, str('John')), a, sdo.Person), This creates a static IRI . This means that the same IRI is used for each record (always person:John ). Notice that the same triple assertion can be made by using the Iri() instead of the iri() function: triple(prefix.person.concat('John'), a, sdo.Person), See also \u00b6 If the same IRI is used in multiple statements, repeating the same assertion multiple times may impose a maintenance burden. In such cases, it is possible to first add the IRI to the record by using the addIri() function, and refer to that one IRI in multiple statement assertions. Use function iris() to create multiple IRIs at once. iris() \u00b6 Creates multiple dynamic or static IRIs, one for each entry in an array of strings. Signature \u00b6 This function has the following signature: iris(fullIris) // 1 iris(prefix, localNames) // 2 Signature 1 is used to explicitly cast strings that encode IRIs to IRIs. Signature 2 is used to create an IRI based on an IRI prefix and a local name. Parameters \u00b6 fullIri is either a key that contains a dynamic string that encodes an absolute IRI, or a static string that encodes an absolute IRI. prefix is an IRI prefix that is declared with the Iri() constructor . localNames is either a key that contains an array of strings, or an array of keys that store dynamic strings and static strings. These string are used as the local names of the IRIs that are created. These local names are suffixed to the given IRI prefix. Example \u00b6 The following code snippet asserts one IRI for each entry in record key 'children' : fromJson([{ parent: 'John', children: ['Joe', 'Jane'] }]), triple(iri(prefix.person, 'parent'), sdo.children, iris(prefix.person, 'children')), This makes the following linked data assertions: person:John sdo:children person:Joe, person:Jane. Or diagrammatically: graph LR john -- sdo:children --> joe john -- sdo:children --> jane john[person:John]:::data joe[person:Joe]:::data jane[person:Jane]:::data classDef data fill:yellow list() \u00b6 Creates an RDF collection or singly-linked list (class rdf:List ). See the Triply Data Story about collections for more information. Signature \u00b6 This function has the following signature: list(prefix, terms) Parameters \u00b6 prefix is an IRI prefix that is declared with the Iri() constructor . terms is an array of dynamic and/or static terms. Example: fruit basket \u00b6 The following code snippet creates linked lists (linked by rdf:rest ), where each value stored in the 'contents' key is rdf:first object: fromJson([{ id: 123, contents: ['apple', 'pear', 'banana'] }]), triple(iri(prefix.basket, 'id'), def.contains, list(prefix.basket, literals('contents', lang.en))), This results in the following linked data: basket:123 def:contains ( 'apple'@en 'pear'@en 'banana'@en ). When we do not make use of the collection notation ( ... ) , the asserted linked data looks as follows: basket:123 def:contains _:list1. _:list1 rdf:first 'apple'@en; rdf:rest _:list2. _:list2 rdf:first 'pear'@en; rdf:rest _:list3. _:list3 rdf:first 'banana'@en; rdf:rest rdf:nil. Or diagrammatically: graph LR basket -- def:contains --> list1 list1 -- rdf:first --> apple list1 -- rdf:rest --> list2 list2 -- rdf:first --> pear list2 -- rdf:rest --> list3 list3 -- rdf:first --> banana list3 -- rdf:rest --> rdf:nil apple[\"'apple'@en\"]:::data list1[_:list1]:::data list2[_:list2]:::data list3[_:list3]:::data banana[\"'banana'@en\"]:::data basket[basket:123]:::data pear[\"'pear'@en\"]:::data classDef data fill:yellow Example: children \u00b6 The following code snippet creates linked lists for the children of every parent: fromJson([{ parent: 'John', children: ['Joe', 'Jane'] }]), triple(iri(prefix.person, 'parent'), sdo.children, list(prefix.skolem, iris(prefix.person, 'children'))), This results in the following linked data: person:John sdo:children _:list1. _:list1 rdf:first person:Joe; rdf:rest _:list2. _:list2 rdf:first person:Jane; rdf:rest rdf:nil. Or diagrammatically: graph LR john -- sdo:children --> list1 list1 -- rdf:first --> joe list1 -- rdf:rest --> list2 list2 -- rdf:first --> jane list2 -- rdf:rest --> rdf:nil list1[_:list1]:::data list2[_:list2]:::data john[person:John]:::data joe[person:Joe]:::data jane[person:Jane]:::data classDef data fill:yellow The above diagram can be translated into the statement: \"John has two children, where Joe is his first child and Jane is his second child\". Maintenance impact \u00b6 Since RDF collections (or single-linked lists) require a large number of triple assertions to establish the structure of the singly-linked list, creating such collections by hand imposes a maintenance risk. It is very easy to forget one link, or to break an existing link later during maintenance. For this reason, it is always better to use the list() function in order to assert collections. Relation to standards \u00b6 The functionality of list() is similar to the collections notation in the linked data standards TriG, Turtle, and SPARQL. Notice that the following notation in these standards: person:John sdo:children ( person:Joe person:Jane ). is structurally similar to the following code snippet that uses list() : triple(iri(prefix.person, str('John')), sdo.children, list(prefix.skolem, [str('Joe'), str('Jane')])), literal() \u00b6 Creates a literal term, based on a lexical form and a datatype IRI or language tag. Signature \u00b6 This function has the following signature: literal(lexicalForm, languageTagOrDatatype) Parameters \u00b6 lexicalForm is a static string (see function str() ), or a key that contains a dynamic string. languageTagOrDatatype is a static language tag or datatype IRI, or a key that contains a dynamic language tag or datatype IRI. Example: language-tagged string \u00b6 The following code snippet uses a language-tagged string: triple('_city', sdo.name, literal('name', lang.nl)), This results in the following linked data: city:Amsterdam sdo:name 'Amsterdam'@nl. city:Berlin sdo:name 'Berlijn'@nl. The lang object contains declarations for all language tags. See the sector on language tag declarations for more information. Example: typed literal \u00b6 The following code snippet uses a typed literal: triple('_city', vocab.population, literal('population', xsd.nonNegativeInteger)), This results in the following linked data: city:Amsterdam vocab:population '1000000'^^xsd:nonNegativeInteger. city:Berlin vocab:population '2000000'^^xsd:nonNegativeInteger. New datatype IRIs can be declared and used, and existing datatype IRIs can be reused from external vocabularies. The following code snippet imports four external vocabularies that contain datatype IRIs: import { dbt, geo, rdf, xsd } from '@triplyetl/etl/generic' Here is one example of a datatype IRI for each of these four external vocabularies: dbt.kilogram geo.wktLiteral rdf.HTML xsd.dateTime Example: string literal \u00b6 Statement assertions support implicit casts from strings to string literals. This means that the following code snippet: triple('_city', dct.identifier, literal('id', xsd.string)), triple('_person', sdo.name, literal(str('John Doe'), xsd.string)), can also be expressed with the following code snippet, which does not use literal() : triple('_city', dct.identifier, 'id'), triple('_person', sdo.name, str('John Doe')), Both code snippets result in the following linked data: city:amsterdam dct:identifier '0200'. person:john-doe sdo:name 'John Doe'. See also \u00b6 If the same literal is used in multiple statements, repeating the same literal assertion multiple times may impose a maintenance burden. In such cases, it is possible to first add the literal to the record with transformation addLiteral() , and refer to that one literal in multiple statements. If multiple literals with the same language tag or datatype IRI are created, repeating the same language tag or datatype IRI may impose a maintenance burden. In such cases, the literals() assertion function can be sued instead. literals() \u00b6 Creates multiple literals, one for each lexical form that appears in an array. Signature \u00b6 The signature for this function is as follows: literals(lexicalForms, languageTagOrDatatype) Parameters \u00b6 lexicalForms is an array that contains string values, or a key that stores an array that contains string values. languageTagOrDatatype is a language tag or datatype IRI. Example: fruit basket \u00b6 The following code snippet creates one literal for each value in the array that is stored in the 'contents' key: fromJson([{ id: 123, contents: ['apple', 'pear', 'banana'] }]), triple(iri(prefix.basket, 'id'), rdfs.member, literals('contents', lang.en)), This results in the following linked data: basket:123 rdfs:member 'apple'@en, 'banana'@en, 'pear'@en. Or diagrammatically: graph LR basket -- rdfs:member --> apple basket -- rdfs:member --> banana basket -- rdfs:member --> pear apple[\"'apple'@en\"]:::data banana[\"'banana'@en\"]:::data basket[basket:123]:::data pear[\"'pear'@en\"]:::data classDef data fill:yellow Example: string literals \u00b6 String literals can be asserted directly from a key that stores an array of strings. The following code snippet asserts one string literal for each child: fromJson([{ parent: 'John', children: ['Joe', 'Jane'] }]), triple(iri(prefix.person, 'parent'), sdo.children, 'children'), This results in the following linked data assertions: person:John sdo:children 'Jane', 'Joe'. Or diagrammatically: graph LR john -- sdo:children --> jane john -- sdo:children --> joe jane['Jane']:::data joe['Joe']:::data john['John']:::data classDef data fill:yellow The same thing can be achieved by specifying an explicit datatype IRI: triple(iri(prefix.person, 'parent'), sdo.children, literals('children', xsd.string)), str() \u00b6 Creates a StaticString value. RATT uses strings to denote keys in the record. The used string denotes a key that results in dynamic values : a value that is different for each record. Sometimes we want to specify a static string instead: a string that is the same for each record. With the str() function we indicate that a string should not be processed as a key, but should be processed as a regular (static) string value. This is usefull in case we want to provide a regular string to a middleware, and not a key that could relate to the record. Signature \u00b6 This function has the following signature: str(string) Parameters \u00b6 string is a string value. Example \u00b6 In RATT, strings often denote keys in the record. For example, the string 'abc' in the following code snippet indicates that the value of key 'abc' should be used as the local name of the IRI in the subject position, and should be used as the lexical form of the literal in the object position: triple(iri(prefix.id, 'abc'), rdfs.label, 'abc'), If we want to assert the regular (static) string 'abc' , we must use the str() function. The following code snippet asserts the IRI id:abc and the literal 'abc' : triple(iri(prefix.id, str('abc')), rdfs.label, str('abc')), To illustrate the difference between a dynamic string 'key' and the static string str('key') in a more complex usecase, imagine we use the following ifElse condition in our ETL: etl.use( fromJson([ { parent: 'John', child: 'Jane', notchild: 'Joe' }, { parent: 'Lisa', child: 'James', notchild: 'Mike' } ]), ifElse({ if: ctx => ctx.getString('parent') === 'John', then: triple(iri(prefix.id, 'parent'), sdo.children, iri(prefix.id, 'child')) }), logQuads() ) In the context of the first record { parent: 'John', child: 'Jane', notchild: 'Joe' } , we grab the string in the key parent for string comparison to the string 'John' . This value is dynamic and will be 'John' for the first record (returning true for the string comparison) and Lisa for the second record (returning false for the string comparison). This results in the created triple: sdo:children If we would use str() in the ifElse for the string comparison, it would statically compare the two strings. This means that in the following example we compare string 'parent' === 'John' , which will return false for each record. ifElse({ if: ctx => ctx.getString(str('parent')) === 'John', then: triple(iri(prefix.id, 'parent'), sdo.children, iri(prefix.id, 'child')) However, if we would change the static string from str('parent') to str('John') , the string comparison will always return true for each record: etl.use( fromJson([ { parent: 'John', child: 'Jane', notchild: 'Joe' }, { parent: 'Lisa', child: 'James', notchild: 'Mike' } ]), ifElse({ if: ctx => ctx.getString(str('John')) === 'John', then: triple(iri(prefix.id, 'parent'), sdo.children, iri(prefix.id, 'child')) }), logQuads() ) This results in the created triples: sdo:children sdo:children Relation to standards \u00b6 The functionality of str() is conceptually similar to the str function in SPARQL. In SPARQL, the str function is used to explicitly cast IRIs to their string value, and literals to their lexical form.","title":"RATT Terms"},{"location":"triply-etl/assert/ratt/terms/#ratt-term-assertion","text":"This page documents RATT functions that are used to create RDF terms. These RDF terms are used in statement assertions . The term assertion functions are imported in the following way: import { iri, iris, literal, literals, str } from '@triplyetl/etl/ratt'","title":"RATT Term Assertion"},{"location":"triply-etl/assert/ratt/terms/#iri-constructor","text":"Creates static IRIs.","title":"Iri() constructor"},{"location":"triply-etl/assert/ratt/terms/#signature","text":"The signature of this constructor is as follows: Iri(string): Iri","title":"Signature"},{"location":"triply-etl/assert/ratt/terms/#parameters","text":"string is a string that encodes an absolute IRI. Once an IRI object is constructed, the concat() member function can be used to create new IRIs according to the following signature: Iri.concat(string): Iri","title":"Parameters"},{"location":"triply-etl/assert/ratt/terms/#example-iri-declaration","text":"The following code snippet creates a static IRI by using the Iri() constructor: const subject = Iri('https://example.com/123') etl.use( triple(subject, a, sdo.Product), )","title":"Example: IRI declaration"},{"location":"triply-etl/assert/ratt/terms/#example-in-line-iri","text":"It is also possible to add the static IRI in-line, without declaring a constant: etl.use( triple(Iri('https://example.com/123'), a, sdo.Product), )","title":"Example: in-line IRI"},{"location":"triply-etl/assert/ratt/terms/#example-iri-concatenation","text":"The following code snippet uses a static IRI that is created by applying the concat() member function const prefix = Iri('https://example.com/') etl.use( triple(prefix.concat('123'), a, sdo.Product), )","title":"Example: IRI concatenation"},{"location":"triply-etl/assert/ratt/terms/#iri-function","text":"Creates a static or dynamic IRI that can be used in statement assertions. Notice that this function is more powerful than the Iri() constructor , which can only create static","title":"iri() function"},{"location":"triply-etl/assert/ratt/terms/#signature_1","text":"This function has the following signature: iri(fullIri) // 1 iri(prefix, localName) // 2 Signature 1 is used to explicitly cast a strings that encodes an absolute IRI to an IRI. Signature 2 is used to create IRIs based on an IRI prefix and multiple local names.","title":"Signature"},{"location":"triply-etl/assert/ratt/terms/#parameters_1","text":"fullIri is either a key that contains a dynamic string that encodes an absolute IRI, or a static string that encodes an absolute IRI. prefix is an IRI prefix that is declared with the Iri() constructor . localName is ither a key that contains a dynamic string, or a static string. This string is used as the local name of the IRI. The local name is suffixed to the given IRI prefix.","title":"Parameters"},{"location":"triply-etl/assert/ratt/terms/#example-explicit-cast-to-iri","text":"The following code snippets casts strings that encode IRIs in the source data to subject and object IRIs that are used in triple assertions: fromJson([ { url: 'https://example.com/id/person/Jane' }, { url: 'https://example.com/id/person/John' }, ]), triple(iri('url'), owl:sameAs, iri('url')), This results in the following linked data: owl:sameAs . owl:sameAs . Notice that the use of iri() it is not required in the subject position, but is required in the object position. The following code snippet results in the same linked data, but uses an implicit cast for the subject term: fromJson([ { url: 'https://example.com/id/person/Jane' }, { url: 'https://example.com/id/person/John' }, ]), triple('url', owl:sameAs, iri('url')), See the section on automatic casts for more information.","title":"Example: explicit cast to IRI"},{"location":"triply-etl/assert/ratt/terms/#example-dynamic-iri","text":"The following code snippet asserts an IRI based on a declared prefix ( prefix.ex ) and the string stored in key ( 'firstName' ): fromJson([{ firstName: 'Jane' }, { firstName: 'John' }]), triple(iri(prefix.person, 'fistName'), a, sdo.Person), This creates a dynamic IRI . This means that the asserted IRI depends on the content of the 'firstName' key in each record. For the first record, IRI person:Jane is created. For the second record, IRI person:John is created.","title":"Example: dynamic IRI"},{"location":"triply-etl/assert/ratt/terms/#example-static-iri","text":"The following asserts an IRI based on a declared prefix ( prefix.ex ) and a static string (see function str() ): triple(iri(prefix.person, str('John')), a, sdo.Person), This creates a static IRI . This means that the same IRI is used for each record (always person:John ). Notice that the same triple assertion can be made by using the Iri() instead of the iri() function: triple(prefix.person.concat('John'), a, sdo.Person),","title":"Example: static IRI"},{"location":"triply-etl/assert/ratt/terms/#see-also","text":"If the same IRI is used in multiple statements, repeating the same assertion multiple times may impose a maintenance burden. In such cases, it is possible to first add the IRI to the record by using the addIri() function, and refer to that one IRI in multiple statement assertions. Use function iris() to create multiple IRIs at once.","title":"See also"},{"location":"triply-etl/assert/ratt/terms/#iris","text":"Creates multiple dynamic or static IRIs, one for each entry in an array of strings.","title":"iris()"},{"location":"triply-etl/assert/ratt/terms/#signature_2","text":"This function has the following signature: iris(fullIris) // 1 iris(prefix, localNames) // 2 Signature 1 is used to explicitly cast strings that encode IRIs to IRIs. Signature 2 is used to create an IRI based on an IRI prefix and a local name.","title":"Signature"},{"location":"triply-etl/assert/ratt/terms/#parameters_2","text":"fullIri is either a key that contains a dynamic string that encodes an absolute IRI, or a static string that encodes an absolute IRI. prefix is an IRI prefix that is declared with the Iri() constructor . localNames is either a key that contains an array of strings, or an array of keys that store dynamic strings and static strings. These string are used as the local names of the IRIs that are created. These local names are suffixed to the given IRI prefix.","title":"Parameters"},{"location":"triply-etl/assert/ratt/terms/#example","text":"The following code snippet asserts one IRI for each entry in record key 'children' : fromJson([{ parent: 'John', children: ['Joe', 'Jane'] }]), triple(iri(prefix.person, 'parent'), sdo.children, iris(prefix.person, 'children')), This makes the following linked data assertions: person:John sdo:children person:Joe, person:Jane. Or diagrammatically: graph LR john -- sdo:children --> joe john -- sdo:children --> jane john[person:John]:::data joe[person:Joe]:::data jane[person:Jane]:::data classDef data fill:yellow","title":"Example"},{"location":"triply-etl/assert/ratt/terms/#list","text":"Creates an RDF collection or singly-linked list (class rdf:List ). See the Triply Data Story about collections for more information.","title":"list()"},{"location":"triply-etl/assert/ratt/terms/#signature_3","text":"This function has the following signature: list(prefix, terms)","title":"Signature"},{"location":"triply-etl/assert/ratt/terms/#parameters_3","text":"prefix is an IRI prefix that is declared with the Iri() constructor . terms is an array of dynamic and/or static terms.","title":"Parameters"},{"location":"triply-etl/assert/ratt/terms/#example-fruit-basket","text":"The following code snippet creates linked lists (linked by rdf:rest ), where each value stored in the 'contents' key is rdf:first object: fromJson([{ id: 123, contents: ['apple', 'pear', 'banana'] }]), triple(iri(prefix.basket, 'id'), def.contains, list(prefix.basket, literals('contents', lang.en))), This results in the following linked data: basket:123 def:contains ( 'apple'@en 'pear'@en 'banana'@en ). When we do not make use of the collection notation ( ... ) , the asserted linked data looks as follows: basket:123 def:contains _:list1. _:list1 rdf:first 'apple'@en; rdf:rest _:list2. _:list2 rdf:first 'pear'@en; rdf:rest _:list3. _:list3 rdf:first 'banana'@en; rdf:rest rdf:nil. Or diagrammatically: graph LR basket -- def:contains --> list1 list1 -- rdf:first --> apple list1 -- rdf:rest --> list2 list2 -- rdf:first --> pear list2 -- rdf:rest --> list3 list3 -- rdf:first --> banana list3 -- rdf:rest --> rdf:nil apple[\"'apple'@en\"]:::data list1[_:list1]:::data list2[_:list2]:::data list3[_:list3]:::data banana[\"'banana'@en\"]:::data basket[basket:123]:::data pear[\"'pear'@en\"]:::data classDef data fill:yellow","title":"Example: fruit basket"},{"location":"triply-etl/assert/ratt/terms/#example-children","text":"The following code snippet creates linked lists for the children of every parent: fromJson([{ parent: 'John', children: ['Joe', 'Jane'] }]), triple(iri(prefix.person, 'parent'), sdo.children, list(prefix.skolem, iris(prefix.person, 'children'))), This results in the following linked data: person:John sdo:children _:list1. _:list1 rdf:first person:Joe; rdf:rest _:list2. _:list2 rdf:first person:Jane; rdf:rest rdf:nil. Or diagrammatically: graph LR john -- sdo:children --> list1 list1 -- rdf:first --> joe list1 -- rdf:rest --> list2 list2 -- rdf:first --> jane list2 -- rdf:rest --> rdf:nil list1[_:list1]:::data list2[_:list2]:::data john[person:John]:::data joe[person:Joe]:::data jane[person:Jane]:::data classDef data fill:yellow The above diagram can be translated into the statement: \"John has two children, where Joe is his first child and Jane is his second child\".","title":"Example: children"},{"location":"triply-etl/assert/ratt/terms/#maintenance-impact","text":"Since RDF collections (or single-linked lists) require a large number of triple assertions to establish the structure of the singly-linked list, creating such collections by hand imposes a maintenance risk. It is very easy to forget one link, or to break an existing link later during maintenance. For this reason, it is always better to use the list() function in order to assert collections.","title":"Maintenance impact"},{"location":"triply-etl/assert/ratt/terms/#relation-to-standards","text":"The functionality of list() is similar to the collections notation in the linked data standards TriG, Turtle, and SPARQL. Notice that the following notation in these standards: person:John sdo:children ( person:Joe person:Jane ). is structurally similar to the following code snippet that uses list() : triple(iri(prefix.person, str('John')), sdo.children, list(prefix.skolem, [str('Joe'), str('Jane')])),","title":"Relation to standards"},{"location":"triply-etl/assert/ratt/terms/#literal","text":"Creates a literal term, based on a lexical form and a datatype IRI or language tag.","title":"literal()"},{"location":"triply-etl/assert/ratt/terms/#signature_4","text":"This function has the following signature: literal(lexicalForm, languageTagOrDatatype)","title":"Signature"},{"location":"triply-etl/assert/ratt/terms/#parameters_4","text":"lexicalForm is a static string (see function str() ), or a key that contains a dynamic string. languageTagOrDatatype is a static language tag or datatype IRI, or a key that contains a dynamic language tag or datatype IRI.","title":"Parameters"},{"location":"triply-etl/assert/ratt/terms/#example-language-tagged-string","text":"The following code snippet uses a language-tagged string: triple('_city', sdo.name, literal('name', lang.nl)), This results in the following linked data: city:Amsterdam sdo:name 'Amsterdam'@nl. city:Berlin sdo:name 'Berlijn'@nl. The lang object contains declarations for all language tags. See the sector on language tag declarations for more information.","title":"Example: language-tagged string"},{"location":"triply-etl/assert/ratt/terms/#example-typed-literal","text":"The following code snippet uses a typed literal: triple('_city', vocab.population, literal('population', xsd.nonNegativeInteger)), This results in the following linked data: city:Amsterdam vocab:population '1000000'^^xsd:nonNegativeInteger. city:Berlin vocab:population '2000000'^^xsd:nonNegativeInteger. New datatype IRIs can be declared and used, and existing datatype IRIs can be reused from external vocabularies. The following code snippet imports four external vocabularies that contain datatype IRIs: import { dbt, geo, rdf, xsd } from '@triplyetl/etl/generic' Here is one example of a datatype IRI for each of these four external vocabularies: dbt.kilogram geo.wktLiteral rdf.HTML xsd.dateTime","title":"Example: typed literal"},{"location":"triply-etl/assert/ratt/terms/#example-string-literal","text":"Statement assertions support implicit casts from strings to string literals. This means that the following code snippet: triple('_city', dct.identifier, literal('id', xsd.string)), triple('_person', sdo.name, literal(str('John Doe'), xsd.string)), can also be expressed with the following code snippet, which does not use literal() : triple('_city', dct.identifier, 'id'), triple('_person', sdo.name, str('John Doe')), Both code snippets result in the following linked data: city:amsterdam dct:identifier '0200'. person:john-doe sdo:name 'John Doe'.","title":"Example: string literal"},{"location":"triply-etl/assert/ratt/terms/#see-also_1","text":"If the same literal is used in multiple statements, repeating the same literal assertion multiple times may impose a maintenance burden. In such cases, it is possible to first add the literal to the record with transformation addLiteral() , and refer to that one literal in multiple statements. If multiple literals with the same language tag or datatype IRI are created, repeating the same language tag or datatype IRI may impose a maintenance burden. In such cases, the literals() assertion function can be sued instead.","title":"See also"},{"location":"triply-etl/assert/ratt/terms/#literals","text":"Creates multiple literals, one for each lexical form that appears in an array.","title":"literals()"},{"location":"triply-etl/assert/ratt/terms/#signature_5","text":"The signature for this function is as follows: literals(lexicalForms, languageTagOrDatatype)","title":"Signature"},{"location":"triply-etl/assert/ratt/terms/#parameters_5","text":"lexicalForms is an array that contains string values, or a key that stores an array that contains string values. languageTagOrDatatype is a language tag or datatype IRI.","title":"Parameters"},{"location":"triply-etl/assert/ratt/terms/#example-fruit-basket_1","text":"The following code snippet creates one literal for each value in the array that is stored in the 'contents' key: fromJson([{ id: 123, contents: ['apple', 'pear', 'banana'] }]), triple(iri(prefix.basket, 'id'), rdfs.member, literals('contents', lang.en)), This results in the following linked data: basket:123 rdfs:member 'apple'@en, 'banana'@en, 'pear'@en. Or diagrammatically: graph LR basket -- rdfs:member --> apple basket -- rdfs:member --> banana basket -- rdfs:member --> pear apple[\"'apple'@en\"]:::data banana[\"'banana'@en\"]:::data basket[basket:123]:::data pear[\"'pear'@en\"]:::data classDef data fill:yellow","title":"Example: fruit basket"},{"location":"triply-etl/assert/ratt/terms/#example-string-literals","text":"String literals can be asserted directly from a key that stores an array of strings. The following code snippet asserts one string literal for each child: fromJson([{ parent: 'John', children: ['Joe', 'Jane'] }]), triple(iri(prefix.person, 'parent'), sdo.children, 'children'), This results in the following linked data assertions: person:John sdo:children 'Jane', 'Joe'. Or diagrammatically: graph LR john -- sdo:children --> jane john -- sdo:children --> joe jane['Jane']:::data joe['Joe']:::data john['John']:::data classDef data fill:yellow The same thing can be achieved by specifying an explicit datatype IRI: triple(iri(prefix.person, 'parent'), sdo.children, literals('children', xsd.string)),","title":"Example: string literals"},{"location":"triply-etl/assert/ratt/terms/#str","text":"Creates a StaticString value. RATT uses strings to denote keys in the record. The used string denotes a key that results in dynamic values : a value that is different for each record. Sometimes we want to specify a static string instead: a string that is the same for each record. With the str() function we indicate that a string should not be processed as a key, but should be processed as a regular (static) string value. This is usefull in case we want to provide a regular string to a middleware, and not a key that could relate to the record.","title":"str()"},{"location":"triply-etl/assert/ratt/terms/#signature_6","text":"This function has the following signature: str(string)","title":"Signature"},{"location":"triply-etl/assert/ratt/terms/#parameters_6","text":"string is a string value.","title":"Parameters"},{"location":"triply-etl/assert/ratt/terms/#example_1","text":"In RATT, strings often denote keys in the record. For example, the string 'abc' in the following code snippet indicates that the value of key 'abc' should be used as the local name of the IRI in the subject position, and should be used as the lexical form of the literal in the object position: triple(iri(prefix.id, 'abc'), rdfs.label, 'abc'), If we want to assert the regular (static) string 'abc' , we must use the str() function. The following code snippet asserts the IRI id:abc and the literal 'abc' : triple(iri(prefix.id, str('abc')), rdfs.label, str('abc')), To illustrate the difference between a dynamic string 'key' and the static string str('key') in a more complex usecase, imagine we use the following ifElse condition in our ETL: etl.use( fromJson([ { parent: 'John', child: 'Jane', notchild: 'Joe' }, { parent: 'Lisa', child: 'James', notchild: 'Mike' } ]), ifElse({ if: ctx => ctx.getString('parent') === 'John', then: triple(iri(prefix.id, 'parent'), sdo.children, iri(prefix.id, 'child')) }), logQuads() ) In the context of the first record { parent: 'John', child: 'Jane', notchild: 'Joe' } , we grab the string in the key parent for string comparison to the string 'John' . This value is dynamic and will be 'John' for the first record (returning true for the string comparison) and Lisa for the second record (returning false for the string comparison). This results in the created triple: sdo:children If we would use str() in the ifElse for the string comparison, it would statically compare the two strings. This means that in the following example we compare string 'parent' === 'John' , which will return false for each record. ifElse({ if: ctx => ctx.getString(str('parent')) === 'John', then: triple(iri(prefix.id, 'parent'), sdo.children, iri(prefix.id, 'child')) However, if we would change the static string from str('parent') to str('John') , the string comparison will always return true for each record: etl.use( fromJson([ { parent: 'John', child: 'Jane', notchild: 'Joe' }, { parent: 'Lisa', child: 'James', notchild: 'Mike' } ]), ifElse({ if: ctx => ctx.getString(str('John')) === 'John', then: triple(iri(prefix.id, 'parent'), sdo.children, iri(prefix.id, 'child')) }), logQuads() ) This results in the created triples: sdo:children sdo:children ","title":"Example"},{"location":"triply-etl/assert/ratt/terms/#relation-to-standards_1","text":"The functionality of str() is conceptually similar to the str function in SPARQL. In SPARQL, the str function is used to explicitly cast IRIs to their string value, and literals to their lexical form.","title":"Relation to standards"},{"location":"triply-etl/enrich/","text":"On this page: Enrich See also Enrich \u00b6 The Enrich step uses linked data that is asserted in the internal store to derive new linked data. graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 3 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] TriplyETL supports the following languages for making enrichments: SHACL Rules allows the implementation of business rules that can add linked data to the internal store. TriplyETL supports the following forms of SHACL Rules: Triple Rules SPARQL Rules SPARQL is a query language that can also be used to add and remove data. This allows SPARQL to be used as an enrichment language. TriplyETL supports the following forms of SPARQL enrichment: SPARQL Construct allows linked data to be added to the internal store. SPARQL Update allows linked data to be added to and deleted from the internal store. See also \u00b6 If you have not loaded any linked data in your Internal Store yet, use one of the following approaches to do so: loadRdf() JSON-LD Expansion The RATT statement assertion functions.","title":"Overview"},{"location":"triply-etl/enrich/#enrich","text":"The Enrich step uses linked data that is asserted in the internal store to derive new linked data. graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 3 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] TriplyETL supports the following languages for making enrichments: SHACL Rules allows the implementation of business rules that can add linked data to the internal store. TriplyETL supports the following forms of SHACL Rules: Triple Rules SPARQL Rules SPARQL is a query language that can also be used to add and remove data. This allows SPARQL to be used as an enrichment language. TriplyETL supports the following forms of SPARQL enrichment: SPARQL Construct allows linked data to be added to the internal store. SPARQL Update allows linked data to be added to and deleted from the internal store.","title":"Enrich"},{"location":"triply-etl/enrich/#see-also","text":"If you have not loaded any linked data in your Internal Store yet, use one of the following approaches to do so: loadRdf() JSON-LD Expansion The RATT statement assertion functions.","title":"See also"},{"location":"triply-etl/enrich/shacl/","text":"SHACL Rules \u00b6 SHACL Rules allow new data to be added to the internal store, based on data that is already present. This makes SHACL Rules a great approach for data enrichment. Since SHACL Rules can be defined as part of the data model by using standardized SHACL properties and classes, it is one of the best approaches for creating and maintaining business rules in complex domains. The order in which rules are evaluated can be specified in terms of dynamic preconditions, or in terms of a predefined order. It is possible to execute rules iteratively, generating increasingly more data upon each iteration, steadily unlocking more rules as the process unfolds. Prerequisites \u00b6 SHACL Rules can be used when the following preconditions are met: You have a data model that has one or more SHACL Rules. You have some linked data in the internal store. If your internal store is still empty, you can read the Assert documentation on how to add linked data to the internal store. The function for executing SHACL Rules is imported as follows: import { executeRules } from '@triplyetl/etl/shacl' Typical ETL structure \u00b6 When SHACL Rules are used, the typical structure of the ETL script looks as follows: Assert some linked data into the internal store. This can be done by loading RDF directly with loadRdf() , or by using an extractor with transformations and assertions . Execute the SHACL rules with executeRules() from library @triplyetl/etl/shacl . Do something with the enriched linked data, e.g. publish it to TriplyDB with toTriplyDb() . import { Etl, Source } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( // Step 1. Assert some linked data into the internal store. executeRules(Source.file('static/model.trig')), // Step 2 // Step 3. Do something with the linked data, e.g. publish it to TriplyDB. ) return etl } Formulating SHACL Rules \u00b6 The actual formulation of the SHACL Rules depends on the kind of SHACL Rule that is used. TriplyETL supports the following two kinds of SHACL Rules: Triple Rules SPARQL Rules SHACL Rules are typically included in the information model of the dataset. Notice that it is possible to combine different kinds of SHACL Rules in the same information model. Iterative rules \u00b6 It is possible to apply SHACL Rules multiple times. This is useful when one rule generates data that can be used by another rule, whose results can be used by another rule, etc. Since it is easy for rules to end up in an infinite loop, the user must set a maximum to the number of iterations that are allowed. This is done with the following options: maxIterations is the maximum number of iterations that are used by the SHACL Rules engine. By default, this value is set to 0, which means that no iterative behavior is used. By setting this value to 2 or higher, iterative behavior is enabled. errorOnMaxIterations is the behavior that is used when the last iteration produced new linked data assertions. This may indicate that more iterations are needed. The possible values for this option are: 'throw' which throws an exception, which terminates the ETL. 'warn' which emits a warning, after which the ETL continues. 'none' which is the default value.","title":"Overview"},{"location":"triply-etl/enrich/shacl/#shacl-rules","text":"SHACL Rules allow new data to be added to the internal store, based on data that is already present. This makes SHACL Rules a great approach for data enrichment. Since SHACL Rules can be defined as part of the data model by using standardized SHACL properties and classes, it is one of the best approaches for creating and maintaining business rules in complex domains. The order in which rules are evaluated can be specified in terms of dynamic preconditions, or in terms of a predefined order. It is possible to execute rules iteratively, generating increasingly more data upon each iteration, steadily unlocking more rules as the process unfolds.","title":"SHACL Rules"},{"location":"triply-etl/enrich/shacl/#prerequisites","text":"SHACL Rules can be used when the following preconditions are met: You have a data model that has one or more SHACL Rules. You have some linked data in the internal store. If your internal store is still empty, you can read the Assert documentation on how to add linked data to the internal store. The function for executing SHACL Rules is imported as follows: import { executeRules } from '@triplyetl/etl/shacl'","title":"Prerequisites"},{"location":"triply-etl/enrich/shacl/#typical-etl-structure","text":"When SHACL Rules are used, the typical structure of the ETL script looks as follows: Assert some linked data into the internal store. This can be done by loading RDF directly with loadRdf() , or by using an extractor with transformations and assertions . Execute the SHACL rules with executeRules() from library @triplyetl/etl/shacl . Do something with the enriched linked data, e.g. publish it to TriplyDB with toTriplyDb() . import { Etl, Source } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( // Step 1. Assert some linked data into the internal store. executeRules(Source.file('static/model.trig')), // Step 2 // Step 3. Do something with the linked data, e.g. publish it to TriplyDB. ) return etl }","title":"Typical ETL structure"},{"location":"triply-etl/enrich/shacl/#formulating-shacl-rules","text":"The actual formulation of the SHACL Rules depends on the kind of SHACL Rule that is used. TriplyETL supports the following two kinds of SHACL Rules: Triple Rules SPARQL Rules SHACL Rules are typically included in the information model of the dataset. Notice that it is possible to combine different kinds of SHACL Rules in the same information model.","title":"Formulating SHACL Rules"},{"location":"triply-etl/enrich/shacl/#iterative-rules","text":"It is possible to apply SHACL Rules multiple times. This is useful when one rule generates data that can be used by another rule, whose results can be used by another rule, etc. Since it is easy for rules to end up in an infinite loop, the user must set a maximum to the number of iterations that are allowed. This is done with the following options: maxIterations is the maximum number of iterations that are used by the SHACL Rules engine. By default, this value is set to 0, which means that no iterative behavior is used. By setting this value to 2 or higher, iterative behavior is enabled. errorOnMaxIterations is the behavior that is used when the last iteration produced new linked data assertions. This may indicate that more iterations are needed. The possible values for this option are: 'throw' which throws an exception, which terminates the ETL. 'warn' which emits a warning, after which the ETL continues. 'none' which is the default value.","title":"Iterative rules"},{"location":"triply-etl/enrich/shacl/sparql-rules/","text":"On this page: SPARQL Rules Example 1: Deducing fatherhood Step 1A: Implement the SPARQL Construct query Step 1B: Create the node shape Step 1C: Write and run the script Step 1D: Using files (optional) SPARQL Rules \u00b6 SPARQL Rules are a form of SHACL Rules . SPARQL Rules can be arbitrarily complex, utilizing all features available in the SPARQL query language. SPARQL Rules have the following benefits and downsides. Benefits: Simple to use if you are familiar with SPARQL. Integrated with the information model. Can be used to assert any number of triples. Allows arbitrarily complex business rules to be formulated, e.g. using aggregation, filters, external data, property paths, function calls. Can use the prefix declarations that are represented in the information model ( sh:namespace and sh:prefix ). Downsides: No reflection: the rule is encoded in a literal, so its internal structure cannot be queried as RDF. No reflection: cannot use the prefix declarations that occur in the serialization format of the information model in which it occurs (e.g. TriG, Turtle). The rest of this page describes a examples that uses SPARQL Rules. Example 1: Deducing fatherhood \u00b6 This example uses the same data and rule as the corresponding Triple Rule example . Step 1A: Implement the SPARQL Construct query \u00b6 In natural language, we can define the following rule for deducing fatherhood: Persons with at least one child and the male gender, are fathers. We can implement this deduction with the following a SPARQL Construct query: base prefix sdo: construct { $this a . } where { $this a sdo:Person; sdo:children []; sdo:gender sdo:Male. } Notice the following details: The conditions are specified in the Where clause. The assertion is specified in the Construct template. We use the variable name $this to bind to the instances for which the rule will be executed. In the SPARQL query language, this name is only a convention, and has exactly the same behavior as using any other variable name such as ?person or ?x . We can run this query directly from TriplyETL, and this will result in the correct deductions. In fact, this is why SPARQL Construct is one of the enrichment configuration languages that are supported by TriplyETL. Step 1B: Create the node shape \u00b6 In Step 1A, we used a SPARQL Construct query to deduce new data.In this step, we will wrap that query inside a SPARQL Rule. This allows us to relate the rule to our information model. In the information model, rules are related to node shapes. When instance data conforms to the node shape, the SPARQL Rule is executed. Notice that this is different from calling SPARQL Construct queries directly, where we must determine when to run which query. SPARQL Rules are triggered by the information model instead. This has many benefits, especially for large collections of business rules, where the execution order may no longer be straightforward. In order for our SPARQL Construct query to be triggered by a node shape, we need to identify some target criterion that will allow the node shape to trigger the query. One target criterion for node shapes is sh:targetClass . We can trigger the SPARQL Construct query for every instance of the class sdo:Person . This means that we move the check of whether a resource is a person from the SPARQL Construct query into the node shape. This results in the following linked data snippet: base prefix sdo: prefix sh: a sh:NodeShape; sh:targetClass sdo:Person; sh:rule [ a sh:SPARQLRule; sh:construct ''' base prefix sdo: construct { $this a . } where { $this sdo:children []; sdo:gender sdo:Male. }''' ]. Notice the following details: We introduce a node shape that targets all instances of sdo:Person . The node shape is connected to a SPARQL Rule via the sh:rule property. The SPARQL Rule has its own RDF resource, and is connected to the query string via the sh:construct property. The SPARQL Construct query from Step 1 no longer include the a sdo:Person line. This line is no longer needed, since the node shape will only trigger for instances of sdo:Person in the first place. The SPARQL Construct query uses variable name $this to bind to the instances for which the rule will be executed. While this name is only a convention in the SPARQL query language, it has a special meaning in the SPARQL Rule. This variable will be bound for all targets of the node shape (i.e. for every person in the data). The literal that contains the SPARQL Construct query uses triple quoted literals notation ( '''...''' ). This notation allows us to use unescaped newlines inside the literal, which allows us to inline the query string in a readable way. Step 1C: Write and run the script \u00b6 The following script is completely self-contained. By copy/pasting it into TriplyETL, you can execute the rule over the instance data, and deduce the fact that John is a father. Notice that the script includes the following components: Load the instance data from Step 1A with loadRdf() . Execute the rule from Step 1B with executeRules() . Print the contents of the internal store with logQuads() . import { logQuads } from '@triplyetl/etl/debug' import { Etl, Source, loadRdf } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string(` base prefix sdo: a sdo:Person; sdo:children ; sdo:gender sdo:Male.`)), executeRules(Source.string(` base prefix sdo: prefix sh: a sh:NodeShape; sh:targetClass sdo:Person; sh:rule [ a sh:SPARQLRule; sh:construct ''' base prefix sdo: construct { $this a . } where { $this sdo:children []; sdo:gender sdo:Male. }''' ].`)), logQuads(), ) return etl } When we run this script (command npx etl ), the following linked data is printed: a sdo:Person, ; sdo:children ; sdo:gender sdo:Male. Notice that the fatherhood assertion was correctly added to the internal store, based on the Triple Rule in the data model. Step 1D: Using files (optional) \u00b6 The script in Step 1C includes both the instance data and the information model as inline strings, using Source.string() . This is great for creating a self-contained example, but not realistic when the number of rules increases. We therefore show the same script after these inline components have been stored in separate files: The instance data is stored in file static/instances.trig . The information model is stored in file static/model.trig . Now the instance data and information model can be edited in their own files, and the script stays concise: import { logQuads } from '@triplyetl/etl/debug' import { Etl, Source, loadRdf } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.file('static/instances.trig')), executeRules(Source.file('static/model.trig')), logQuads(), ) return etl }","title":"SPARQL Rules"},{"location":"triply-etl/enrich/shacl/sparql-rules/#sparql-rules","text":"SPARQL Rules are a form of SHACL Rules . SPARQL Rules can be arbitrarily complex, utilizing all features available in the SPARQL query language. SPARQL Rules have the following benefits and downsides. Benefits: Simple to use if you are familiar with SPARQL. Integrated with the information model. Can be used to assert any number of triples. Allows arbitrarily complex business rules to be formulated, e.g. using aggregation, filters, external data, property paths, function calls. Can use the prefix declarations that are represented in the information model ( sh:namespace and sh:prefix ). Downsides: No reflection: the rule is encoded in a literal, so its internal structure cannot be queried as RDF. No reflection: cannot use the prefix declarations that occur in the serialization format of the information model in which it occurs (e.g. TriG, Turtle). The rest of this page describes a examples that uses SPARQL Rules.","title":"SPARQL Rules"},{"location":"triply-etl/enrich/shacl/sparql-rules/#example-1-deducing-fatherhood","text":"This example uses the same data and rule as the corresponding Triple Rule example .","title":"Example 1: Deducing fatherhood"},{"location":"triply-etl/enrich/shacl/sparql-rules/#step-1a-implement-the-sparql-construct-query","text":"In natural language, we can define the following rule for deducing fatherhood: Persons with at least one child and the male gender, are fathers. We can implement this deduction with the following a SPARQL Construct query: base prefix sdo: construct { $this a . } where { $this a sdo:Person; sdo:children []; sdo:gender sdo:Male. } Notice the following details: The conditions are specified in the Where clause. The assertion is specified in the Construct template. We use the variable name $this to bind to the instances for which the rule will be executed. In the SPARQL query language, this name is only a convention, and has exactly the same behavior as using any other variable name such as ?person or ?x . We can run this query directly from TriplyETL, and this will result in the correct deductions. In fact, this is why SPARQL Construct is one of the enrichment configuration languages that are supported by TriplyETL.","title":"Step 1A: Implement the SPARQL Construct query"},{"location":"triply-etl/enrich/shacl/sparql-rules/#step-1b-create-the-node-shape","text":"In Step 1A, we used a SPARQL Construct query to deduce new data.In this step, we will wrap that query inside a SPARQL Rule. This allows us to relate the rule to our information model. In the information model, rules are related to node shapes. When instance data conforms to the node shape, the SPARQL Rule is executed. Notice that this is different from calling SPARQL Construct queries directly, where we must determine when to run which query. SPARQL Rules are triggered by the information model instead. This has many benefits, especially for large collections of business rules, where the execution order may no longer be straightforward. In order for our SPARQL Construct query to be triggered by a node shape, we need to identify some target criterion that will allow the node shape to trigger the query. One target criterion for node shapes is sh:targetClass . We can trigger the SPARQL Construct query for every instance of the class sdo:Person . This means that we move the check of whether a resource is a person from the SPARQL Construct query into the node shape. This results in the following linked data snippet: base prefix sdo: prefix sh: a sh:NodeShape; sh:targetClass sdo:Person; sh:rule [ a sh:SPARQLRule; sh:construct ''' base prefix sdo: construct { $this a . } where { $this sdo:children []; sdo:gender sdo:Male. }''' ]. Notice the following details: We introduce a node shape that targets all instances of sdo:Person . The node shape is connected to a SPARQL Rule via the sh:rule property. The SPARQL Rule has its own RDF resource, and is connected to the query string via the sh:construct property. The SPARQL Construct query from Step 1 no longer include the a sdo:Person line. This line is no longer needed, since the node shape will only trigger for instances of sdo:Person in the first place. The SPARQL Construct query uses variable name $this to bind to the instances for which the rule will be executed. While this name is only a convention in the SPARQL query language, it has a special meaning in the SPARQL Rule. This variable will be bound for all targets of the node shape (i.e. for every person in the data). The literal that contains the SPARQL Construct query uses triple quoted literals notation ( '''...''' ). This notation allows us to use unescaped newlines inside the literal, which allows us to inline the query string in a readable way.","title":"Step 1B: Create the node shape"},{"location":"triply-etl/enrich/shacl/sparql-rules/#step-1c-write-and-run-the-script","text":"The following script is completely self-contained. By copy/pasting it into TriplyETL, you can execute the rule over the instance data, and deduce the fact that John is a father. Notice that the script includes the following components: Load the instance data from Step 1A with loadRdf() . Execute the rule from Step 1B with executeRules() . Print the contents of the internal store with logQuads() . import { logQuads } from '@triplyetl/etl/debug' import { Etl, Source, loadRdf } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string(` base prefix sdo: a sdo:Person; sdo:children ; sdo:gender sdo:Male.`)), executeRules(Source.string(` base prefix sdo: prefix sh: a sh:NodeShape; sh:targetClass sdo:Person; sh:rule [ a sh:SPARQLRule; sh:construct ''' base prefix sdo: construct { $this a . } where { $this sdo:children []; sdo:gender sdo:Male. }''' ].`)), logQuads(), ) return etl } When we run this script (command npx etl ), the following linked data is printed: a sdo:Person, ; sdo:children ; sdo:gender sdo:Male. Notice that the fatherhood assertion was correctly added to the internal store, based on the Triple Rule in the data model.","title":"Step 1C: Write and run the script"},{"location":"triply-etl/enrich/shacl/sparql-rules/#step-1d-using-files-optional","text":"The script in Step 1C includes both the instance data and the information model as inline strings, using Source.string() . This is great for creating a self-contained example, but not realistic when the number of rules increases. We therefore show the same script after these inline components have been stored in separate files: The instance data is stored in file static/instances.trig . The information model is stored in file static/model.trig . Now the instance data and information model can be edited in their own files, and the script stays concise: import { logQuads } from '@triplyetl/etl/debug' import { Etl, Source, loadRdf } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.file('static/instances.trig')), executeRules(Source.file('static/model.trig')), logQuads(), ) return etl }","title":"Step 1D: Using files (optional)"},{"location":"triply-etl/enrich/shacl/triple-rules/","text":"On this page: Triple Rules Example: Deducing fatherhood Step A: Load instance data Step B: Formulate the SHACL rule Step C: Write and run the script Step D: Using files (optional) See also Triple Rules \u00b6 Triple Rules are a form of SHACL Rules . Triple Rules can only assert one single triple, but they are relatively easy to learn and apply. Triple Rules have the following benefits and downsides. Benefits: Simple to use if you are familiar with RDF. Does not require knowledge of the SPARQL language. Integrated with the information model. Reflection: the rule can itself be queried as RDF. Reflection: the rule can use the prefix declarations that occur in the serialization format of the information model in which it appears (e.g. TriG, Turtle). Downsides: Can only assert one single triple. Cannot use the prefix declarations that are represented in the information model (using sh:namespace and sh:prefix ). The rest of this page describes a complete example that uses Triple Rules. Example: Deducing fatherhood \u00b6 This section describes a full example of a Triple Rule that can deduce that somebody is a father, based on other facts that are known about the person. Step A: Load instance data \u00b6 We first need to load some instance data, so that we can apply a rule and enrich the loaded data with some new data. We start with linked data assertions that state that John is a male who has a child (Mary): base prefix sdo: a sdo:Person; sdo:children ; sdo:gender sdo:Male. Applying our knowledge of the world, we as humans can deduce that John is also a father. This deduction can also be expressed in linked data: base a . When we make this deduction, we are applying a (possibly implicit) rule. When we try to make the rule that we have applied explicit, we discover that a rule has the following two components: The condition is the criterion that must be met in order for the rule to become applicable. In our example, we must have instance data about a person. That person must have at least one child, and that person must be male. Notice that the condition can be made arbitrarily complex: we can add more criteria like age, nationality, etc. if we wanted to. The assertion is the new data that we can add to our internal store. In our example, this is the assertion that John is a father. We can show this principle in a diagram, where condition and assertion contain the two components of the rule: graph subgraph Condition id:john -- a --> sdo:Person id:john -- sdo:children --> id:mary id:john -- sdo:gender --> sdo:Male end subgraph Assertion id:john -- a --> def:Father end Step B: Formulate the SHACL rule \u00b6 In Step A, we applied a rule to the instance John. But our dataset may contain information about other people too: people with or without children, people with different genders, etc. Suppose our dataset contains information about Peter, who has two children and has the male gender. We can apply the same rule to deduce that Peter is also a father. When we apply the same rule to an arbitrary number of instances, we are applying a principle called 'generalization'. We replace information about instances like 'John' and 'Peter' with a generic class such as 'Person'. When we think about it, the generalized rule that we have applied to John and Peter, and that we can apply to any number of individuals, runs as follows: Persons with at least one child and the male gender, are fathers. We can formalize this generalized rule in the following SHACL snippet: base prefix rdf: prefix sdo: prefix sh: sh:targetClass sdo:Person; sh:rule [ a sh:TripleRule; sh:condition [ sh:property [ sh:path sdo:children; sh:minCount 1 ] ], [ sh:property [ sh:path sdo:gender; sh:hasValue sdo:Male ] ]; sh:subject sh:this; sh:predicate rdf:type; sh:object ]. Notice the following details: The rule only applies to persons, i.e. instances of the class sdo:Person . This is expressed by the sh:targetClass property. The first condition of the rule is that the person must have at least one child. This is expressed by sh:condition and sh:minCount . The second condition of the rule is that the gender of the person is male. This is expressed by sh:condition and sh:hasValue . The assertion is that the person is a father. Since we use a Triple Rule, this is expressed by the properties sh:subject , sh:predicate , and sh:object . Notice that the term sh:this is used to refer to individuals for whom all conditions are met (in our example: John). Step C: Write and run the script \u00b6 The following script is completely self-contained. By copy/pasting it into TriplyETL, you can execute the rule over the instance data, and deduce the fact that John is a father. Notice that the script includes the following components: Load the instance data from Step A with loadRdf() . Execute the rule from Step B with executeRules() . Print the contents of the internal store with logQuads() . import { logQuads } from '@triplyetl/etl/debug' import { Etl, Source, loadRdf } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string(` base prefix sdo: a sdo:Person; sdo:children ; sdo:gender sdo:Male.`)), executeRules(Source.string(` base prefix rdf: prefix sdo: prefix sh: sh:targetClass sdo:Person; sh:rule [ a sh:TripleRule; sh:condition [ sh:property [ sh:path sdo:children; sh:minCount 1 ] ], [ sh:property [ sh:path sdo:gender; sh:hasValue sdo:Male ] ]; sh:subject sh:this; sh:predicate rdf:type; sh:object ].`)), logQuads(), ) return etl } When we run this script (command npx etl ), the following linked data is printed: a sdo:Person, ; sdo:children ; sdo:gender sdo:Male. Notice that the fatherhood assertion was correctly added to the internal store, based on the Triple Rule in the data model. Step D: Using files (optional) \u00b6 The script in Step C includes both the instance data and the information model as inline strings, using Source.string() . This is great for creating a self-contained example, but not realistic when the number of rules increases. We therefore show the same script after these inline components have been stored in separate files: The instance data is stored in file static/instances.trig . The information model is stored in file static/model.trig . Now the instance data and information model can be edited in their own files, and the script stays concise: import { logQuads } from '@triplyetl/etl/debug' import { Etl, Source, loadRdf } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.file('static/instances.trig')), executeRules(Source.file('static/model.trig')), logQuads(), ) return etl } See also \u00b6 Use SPARQL Rules for rules that are more complex and that cannot be expressed by Triple Rules. Triple Rules are a form of SHACL Rules . SHACL Rules are documented in the SHACL Advanced Features Working Group Note . SHACL Rules are a form of data enrichment. Go to the enrichment overview page for information about other enrichment approaches.","title":"Triple Rules"},{"location":"triply-etl/enrich/shacl/triple-rules/#triple-rules","text":"Triple Rules are a form of SHACL Rules . Triple Rules can only assert one single triple, but they are relatively easy to learn and apply. Triple Rules have the following benefits and downsides. Benefits: Simple to use if you are familiar with RDF. Does not require knowledge of the SPARQL language. Integrated with the information model. Reflection: the rule can itself be queried as RDF. Reflection: the rule can use the prefix declarations that occur in the serialization format of the information model in which it appears (e.g. TriG, Turtle). Downsides: Can only assert one single triple. Cannot use the prefix declarations that are represented in the information model (using sh:namespace and sh:prefix ). The rest of this page describes a complete example that uses Triple Rules.","title":"Triple Rules"},{"location":"triply-etl/enrich/shacl/triple-rules/#example-deducing-fatherhood","text":"This section describes a full example of a Triple Rule that can deduce that somebody is a father, based on other facts that are known about the person.","title":"Example: Deducing fatherhood"},{"location":"triply-etl/enrich/shacl/triple-rules/#step-a-load-instance-data","text":"We first need to load some instance data, so that we can apply a rule and enrich the loaded data with some new data. We start with linked data assertions that state that John is a male who has a child (Mary): base prefix sdo: a sdo:Person; sdo:children ; sdo:gender sdo:Male. Applying our knowledge of the world, we as humans can deduce that John is also a father. This deduction can also be expressed in linked data: base a . When we make this deduction, we are applying a (possibly implicit) rule. When we try to make the rule that we have applied explicit, we discover that a rule has the following two components: The condition is the criterion that must be met in order for the rule to become applicable. In our example, we must have instance data about a person. That person must have at least one child, and that person must be male. Notice that the condition can be made arbitrarily complex: we can add more criteria like age, nationality, etc. if we wanted to. The assertion is the new data that we can add to our internal store. In our example, this is the assertion that John is a father. We can show this principle in a diagram, where condition and assertion contain the two components of the rule: graph subgraph Condition id:john -- a --> sdo:Person id:john -- sdo:children --> id:mary id:john -- sdo:gender --> sdo:Male end subgraph Assertion id:john -- a --> def:Father end","title":"Step A: Load instance data"},{"location":"triply-etl/enrich/shacl/triple-rules/#step-b-formulate-the-shacl-rule","text":"In Step A, we applied a rule to the instance John. But our dataset may contain information about other people too: people with or without children, people with different genders, etc. Suppose our dataset contains information about Peter, who has two children and has the male gender. We can apply the same rule to deduce that Peter is also a father. When we apply the same rule to an arbitrary number of instances, we are applying a principle called 'generalization'. We replace information about instances like 'John' and 'Peter' with a generic class such as 'Person'. When we think about it, the generalized rule that we have applied to John and Peter, and that we can apply to any number of individuals, runs as follows: Persons with at least one child and the male gender, are fathers. We can formalize this generalized rule in the following SHACL snippet: base prefix rdf: prefix sdo: prefix sh: sh:targetClass sdo:Person; sh:rule [ a sh:TripleRule; sh:condition [ sh:property [ sh:path sdo:children; sh:minCount 1 ] ], [ sh:property [ sh:path sdo:gender; sh:hasValue sdo:Male ] ]; sh:subject sh:this; sh:predicate rdf:type; sh:object ]. Notice the following details: The rule only applies to persons, i.e. instances of the class sdo:Person . This is expressed by the sh:targetClass property. The first condition of the rule is that the person must have at least one child. This is expressed by sh:condition and sh:minCount . The second condition of the rule is that the gender of the person is male. This is expressed by sh:condition and sh:hasValue . The assertion is that the person is a father. Since we use a Triple Rule, this is expressed by the properties sh:subject , sh:predicate , and sh:object . Notice that the term sh:this is used to refer to individuals for whom all conditions are met (in our example: John).","title":"Step B: Formulate the SHACL rule"},{"location":"triply-etl/enrich/shacl/triple-rules/#step-c-write-and-run-the-script","text":"The following script is completely self-contained. By copy/pasting it into TriplyETL, you can execute the rule over the instance data, and deduce the fact that John is a father. Notice that the script includes the following components: Load the instance data from Step A with loadRdf() . Execute the rule from Step B with executeRules() . Print the contents of the internal store with logQuads() . import { logQuads } from '@triplyetl/etl/debug' import { Etl, Source, loadRdf } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string(` base prefix sdo: a sdo:Person; sdo:children ; sdo:gender sdo:Male.`)), executeRules(Source.string(` base prefix rdf: prefix sdo: prefix sh: sh:targetClass sdo:Person; sh:rule [ a sh:TripleRule; sh:condition [ sh:property [ sh:path sdo:children; sh:minCount 1 ] ], [ sh:property [ sh:path sdo:gender; sh:hasValue sdo:Male ] ]; sh:subject sh:this; sh:predicate rdf:type; sh:object ].`)), logQuads(), ) return etl } When we run this script (command npx etl ), the following linked data is printed: a sdo:Person, ; sdo:children ; sdo:gender sdo:Male. Notice that the fatherhood assertion was correctly added to the internal store, based on the Triple Rule in the data model.","title":"Step C: Write and run the script"},{"location":"triply-etl/enrich/shacl/triple-rules/#step-d-using-files-optional","text":"The script in Step C includes both the instance data and the information model as inline strings, using Source.string() . This is great for creating a self-contained example, but not realistic when the number of rules increases. We therefore show the same script after these inline components have been stored in separate files: The instance data is stored in file static/instances.trig . The information model is stored in file static/model.trig . Now the instance data and information model can be edited in their own files, and the script stays concise: import { logQuads } from '@triplyetl/etl/debug' import { Etl, Source, loadRdf } from '@triplyetl/etl/generic' import { executeRules } from '@triplyetl/etl/shacl' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.file('static/instances.trig')), executeRules(Source.file('static/model.trig')), logQuads(), ) return etl }","title":"Step D: Using files (optional)"},{"location":"triply-etl/enrich/shacl/triple-rules/#see-also","text":"Use SPARQL Rules for rules that are more complex and that cannot be expressed by Triple Rules. Triple Rules are a form of SHACL Rules . SHACL Rules are documented in the SHACL Advanced Features Working Group Note . SHACL Rules are a form of data enrichment. Go to the enrichment overview page for information about other enrichment approaches.","title":"See also"},{"location":"triply-etl/enrich/sparql/construct/","text":"On this page: SPARQL Construct Signature Parameters Example Usage Relation to standards SPARQL Construct \u00b6 SPARQL Construct queries can be used to enrich the data that is in the Internal Store. The following full TriplyETL script loads one triple into the Internal Store, and then uses a SPARQL Construct query to add a second triple: Signature \u00b6 This function has the following signature: construct(query, opts?) Parameters \u00b6 query : is a query string, this can be a SPARQL query string, reference to a query file, or an operation on the Context ( (ctx: Context) => string|string[] ). The aforementioned can query arguments can also be provided in an array of arguments for the query parameter. opts : an optional object containing options for SPARQL Construct toGraph : an optional argument to store the construct query results provided graph, defaults to the ETL's default graph. Example Usage \u00b6 import { logQuads } from '@triplyetl/etl/debug' import { Etl, loadRdf, Source } from '@triplyetl/etl/generic' import { construct } from '@triplyetl/etl/sparql' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string('

.')), construct('construct { ?o ?p ?s. } where { ?s ?p ?o. }'), logQuads(), ) return etl } This results in the following linked data:

.

. Relation to standards \u00b6 This function is an implementation of the SPARQL Construct, for more information on the standard see SPARQL Construct .","title":"SPARQL Construct"},{"location":"triply-etl/enrich/sparql/construct/#sparql-construct","text":"SPARQL Construct queries can be used to enrich the data that is in the Internal Store. The following full TriplyETL script loads one triple into the Internal Store, and then uses a SPARQL Construct query to add a second triple:","title":"SPARQL Construct"},{"location":"triply-etl/enrich/sparql/construct/#signature","text":"This function has the following signature: construct(query, opts?)","title":"Signature"},{"location":"triply-etl/enrich/sparql/construct/#parameters","text":"query : is a query string, this can be a SPARQL query string, reference to a query file, or an operation on the Context ( (ctx: Context) => string|string[] ). The aforementioned can query arguments can also be provided in an array of arguments for the query parameter. opts : an optional object containing options for SPARQL Construct toGraph : an optional argument to store the construct query results provided graph, defaults to the ETL's default graph.","title":"Parameters"},{"location":"triply-etl/enrich/sparql/construct/#example-usage","text":"import { logQuads } from '@triplyetl/etl/debug' import { Etl, loadRdf, Source } from '@triplyetl/etl/generic' import { construct } from '@triplyetl/etl/sparql' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string('

.')), construct('construct { ?o ?p ?s. } where { ?s ?p ?o. }'), logQuads(), ) return etl } This results in the following linked data:

.

.","title":"Example Usage"},{"location":"triply-etl/enrich/sparql/construct/#relation-to-standards","text":"This function is an implementation of the SPARQL Construct, for more information on the standard see SPARQL Construct .","title":"Relation to standards"},{"location":"triply-etl/enrich/sparql/update/","text":"On this page: SPARQL Update Insert Data Using prefix declarations Delete Data Delete Insert Where SPARQL Update \u00b6 SPARQL is a powerful query language that can be used to modify and enrich linked data in the Internal Store. With SPARQL, you can generate new linked data based on existing linked data, thereby enhancing the contents of the store. The function for using SPARQL Update can be imported as follows: import { update } from '@triplyetl/etl/sparql' Insert Data \u00b6 Insert Data can be used to add linked data to the Internal Store. The following example adds one triple: import { logQuads } from '@triplyetl/etl/debug' import { Etl } from '@triplyetl/etl/generic' import { update } from '@triplyetl/etl/sparql' export default async function (): Promise { const etl = new Etl() etl.use( update(` base insert data { . }`), logQuads(), ) return etl } Debug function logQuads() prints the content of the internal store to standard output: base . Using prefix declarations \u00b6 Notice that the SPARQL Update function takes a plain string. Any typos you make in this string will only result in errors at runtime, when the query string is interpreted and executed. One of the more difficult things to get right in a SPARQL string are the prefix declarations. We can use the prefix object to insert the correct IRI prefixes. The following example asserts three triples, and uses the prefix object to insert the IRI prefix for Schema.org: import { logQuads } from '@triplyetl/etl/debug' import { Etl } from '@triplyetl/etl/generic' import { update } from '@triplyetl/etl/sparql' import { sdo } from '@triplyetl/vocabularies' export default async function (): Promise { const etl = new Etl() etl.use( update(` base prefix sdo: <${sdo.$namespace}> insert data { a sdo:Person; sdo:children ; sdo:gender sdo:Male. }`), logQuads(), ) return etl } This prints the following linked data to standard output: base prefix sdo: a sdo:Person; sdo:children ; sdo:gender sdo:Male. Delete Data \u00b6 While there are not many uses cases for removing data from the internal store, this is an operation that is supported by the SPARQL Update standard. The following function call removes the parent/child relationship assertion that was added to the internal store earlier: update(` prefix sdo: <${sdo.$namespace}> delete data { sdo:children . }`), You can use the debug function logQuads() before and after this function call, to see the effects on the internal store. Delete Insert Where \u00b6 SPARQL Update can be used to conditionally add and/or remove linked data to/from the internal store. It uses the following keywords for this: where is the condition that must be met inside the internal store. Conditions can be specified in a generic way by using SPARQL variables. The bindings for these variables are shared with the other two components. delete is the pattern that is removed from the internal store. This requires that the where condition is satisfied in the internal store. Any bindings for variables that are shared between the where condition and the delete pattern are instantiated before deletion is performed. Deletion is performed before insertion. insert is the pattern that is added to the internal store. This requires that the where condition is satisfied in the internal store. Any bindings for variables that are shared between the where condition and the insert pattern are instantiated before insertion is performed. Insertion is performed after deletion. We can use this powerful combination of a where condition and a delete and insert follow-up to implement rules. For example, we may want to formalize the following rule: Persons with at least one child and the male gender, are fathers. At the same time, we may be restricted in the information we are allowed to publish in our linked dataset: After fatherhood has been determined, any specific information about parent/child relationships must be removed from the internal store. The rule can be formalized as follows: import { logQuads } from '@triplyetl/etl/debug' import { Etl } from '@triplyetl/etl/generic' import { update } from '@triplyetl/etl/sparql' import { sdo } from '@triplyetl/vocabularies' const baseIri = 'https://triplydb.com/' export default async function (): Promise { const etl = new Etl() etl.use( update(` base <${baseIri}> prefix sdo: <${sdo.$namespace}> insert data { a sdo:Person; sdo:children ; sdo:gender sdo:Male. }`), update(` base <${baseIri}> prefix sdo: <${sdo.$namespace}> delete { $person sdo:children ?child. } insert { $person a . } where { $person a sdo:Person; sdo:children ?child; sdo:gender sdo:Male. }`), logQuads(), ) return etl }","title":"SPARQL Update"},{"location":"triply-etl/enrich/sparql/update/#sparql-update","text":"SPARQL is a powerful query language that can be used to modify and enrich linked data in the Internal Store. With SPARQL, you can generate new linked data based on existing linked data, thereby enhancing the contents of the store. The function for using SPARQL Update can be imported as follows: import { update } from '@triplyetl/etl/sparql'","title":"SPARQL Update"},{"location":"triply-etl/enrich/sparql/update/#insert-data","text":"Insert Data can be used to add linked data to the Internal Store. The following example adds one triple: import { logQuads } from '@triplyetl/etl/debug' import { Etl } from '@triplyetl/etl/generic' import { update } from '@triplyetl/etl/sparql' export default async function (): Promise { const etl = new Etl() etl.use( update(` base insert data { . }`), logQuads(), ) return etl } Debug function logQuads() prints the content of the internal store to standard output: base .","title":"Insert Data"},{"location":"triply-etl/enrich/sparql/update/#using-prefix-declarations","text":"Notice that the SPARQL Update function takes a plain string. Any typos you make in this string will only result in errors at runtime, when the query string is interpreted and executed. One of the more difficult things to get right in a SPARQL string are the prefix declarations. We can use the prefix object to insert the correct IRI prefixes. The following example asserts three triples, and uses the prefix object to insert the IRI prefix for Schema.org: import { logQuads } from '@triplyetl/etl/debug' import { Etl } from '@triplyetl/etl/generic' import { update } from '@triplyetl/etl/sparql' import { sdo } from '@triplyetl/vocabularies' export default async function (): Promise { const etl = new Etl() etl.use( update(` base prefix sdo: <${sdo.$namespace}> insert data { a sdo:Person; sdo:children ; sdo:gender sdo:Male. }`), logQuads(), ) return etl } This prints the following linked data to standard output: base prefix sdo: a sdo:Person; sdo:children ; sdo:gender sdo:Male.","title":"Using prefix declarations"},{"location":"triply-etl/enrich/sparql/update/#delete-data","text":"While there are not many uses cases for removing data from the internal store, this is an operation that is supported by the SPARQL Update standard. The following function call removes the parent/child relationship assertion that was added to the internal store earlier: update(` prefix sdo: <${sdo.$namespace}> delete data { sdo:children . }`), You can use the debug function logQuads() before and after this function call, to see the effects on the internal store.","title":"Delete Data"},{"location":"triply-etl/enrich/sparql/update/#delete-insert-where","text":"SPARQL Update can be used to conditionally add and/or remove linked data to/from the internal store. It uses the following keywords for this: where is the condition that must be met inside the internal store. Conditions can be specified in a generic way by using SPARQL variables. The bindings for these variables are shared with the other two components. delete is the pattern that is removed from the internal store. This requires that the where condition is satisfied in the internal store. Any bindings for variables that are shared between the where condition and the delete pattern are instantiated before deletion is performed. Deletion is performed before insertion. insert is the pattern that is added to the internal store. This requires that the where condition is satisfied in the internal store. Any bindings for variables that are shared between the where condition and the insert pattern are instantiated before insertion is performed. Insertion is performed after deletion. We can use this powerful combination of a where condition and a delete and insert follow-up to implement rules. For example, we may want to formalize the following rule: Persons with at least one child and the male gender, are fathers. At the same time, we may be restricted in the information we are allowed to publish in our linked dataset: After fatherhood has been determined, any specific information about parent/child relationships must be removed from the internal store. The rule can be formalized as follows: import { logQuads } from '@triplyetl/etl/debug' import { Etl } from '@triplyetl/etl/generic' import { update } from '@triplyetl/etl/sparql' import { sdo } from '@triplyetl/vocabularies' const baseIri = 'https://triplydb.com/' export default async function (): Promise { const etl = new Etl() etl.use( update(` base <${baseIri}> prefix sdo: <${sdo.$namespace}> insert data { a sdo:Person; sdo:children ; sdo:gender sdo:Male. }`), update(` base <${baseIri}> prefix sdo: <${sdo.$namespace}> delete { $person sdo:children ?child. } insert { $person a . } where { $person a sdo:Person; sdo:children ?child; sdo:gender sdo:Male. }`), logQuads(), ) return etl }","title":"Delete Insert Where"},{"location":"triply-etl/extract/","text":"On this page: Extract Next steps Extract \u00b6 The TriplyETL Extract step is the first step in any TriplyETL pipeline. It is indicated by the red arrow in the following diagram: graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 0 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] In the Extract step, one or more extractors are used to create a stream of records from a data source . The basic structure of every record in is the same: it does not matter which extractor or which source is used. The following extractors are currently supported: CSV or Comma-Separated Values JSON or JavaScript Object Notation OAI-PMH or Open Archives Initiative Protocol for Metadata Harvesting Postgres for PostgreSQL Query & Postgres API Options RDF for Resource Description Format Shapefile for ESRI Shapefiles TSV for Tab-Separated Values XLSX for Microsoft Excel XML for XML Markup Language Next steps \u00b6 The Extract step results in a stream of records that can be processed in the following steps: Step 2. Transform : cleans, combines, and extends data in the[record. Step 3. Assert : uses data from the record to make linked data assertions in the internal store.","title":"Overview"},{"location":"triply-etl/extract/#extract","text":"The TriplyETL Extract step is the first step in any TriplyETL pipeline. It is indicated by the red arrow in the following diagram: graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 0 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] In the Extract step, one or more extractors are used to create a stream of records from a data source . The basic structure of every record in is the same: it does not matter which extractor or which source is used. The following extractors are currently supported: CSV or Comma-Separated Values JSON or JavaScript Object Notation OAI-PMH or Open Archives Initiative Protocol for Metadata Harvesting Postgres for PostgreSQL Query & Postgres API Options RDF for Resource Description Format Shapefile for ESRI Shapefiles TSV for Tab-Separated Values XLSX for Microsoft Excel XML for XML Markup Language","title":"Extract"},{"location":"triply-etl/extract/#next-steps","text":"The Extract step results in a stream of records that can be processed in the following steps: Step 2. Transform : cleans, combines, and extends data in the[record. Step 3. Assert : uses data from the record to make linked data assertions in the internal store.","title":"Next steps"},{"location":"triply-etl/extract/csv/","text":"On this page: CSV extractor Basic usage Standards-compliance Encoding configuration Separator configuration CSV with tab separators is not TSV Record representation CSV extractor \u00b6 CSV or Comma Separated Values (file name extension .csv ) is a popular format for storing tabular source data. TriplyETL has a dedicated fromCsv() extractor for this data format. Basic usage \u00b6 The CSV extractor is imported in the following way: import { fromCsv, Source } from '@triplyetl/etl/generic' The following code snippet extracts records from a local CSV file: fromCsv(Source.file('data.csv')), The following code snippet extracts records from an online CSV file, that is hosted at the specified URL: fromCsv(Source.url('https://somewhere.com/data.csv')), The following code snippet extracts records from a TriplyDB Asset . The asset is store in the data with name 'some-data' , under an account with name 'some-account' . The name of the asset is 'example.csv' : fromCsv( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.csv' } ) ), Standards-compliance \u00b6 The fromCsv() extractor implements the official CSV standard: IETF RFC 4180 . Some CSV files do not follow the standard precisely. In order to process such CSV files, the default behavior of the extractor can be changed through an optional options parameter. See the CSV Parse for Node.js documentation for all the available options. Encoding configuration \u00b6 According to the official CSV standard, CSV sources are allowed to use any encoding. Since the CSV format does not allow the used encoding to be specified in the format itself, a non-standard encoding must always be configured manually. By default, TriplyETL assumes that CSV sources use the UTF-8 encoding. If another encoding is used, this must be explicitly specified by using the optional options parameter. The following snippet configures that the CSV source uses the ISO Latin-1 encoding: fromCsv( Source.TriplyDb.asset('my-dataset', { name: 'example.csv' }), { encoding: 'latin1' } ), The following encodings are currently supported: Value Encoding Standard Alternative values 'ascii' US-ASCII ANSI 'latin1' Latin-1 ISO-8859-1 binary 'utf8' UTF-8 Unicode 'utf16le' UTF-16 Little Endian Unicode 'ucs2' , 'ucs-2' , 'utf16-le' Read the CSV Parse for Node.js documentation for more information. Separator configuration \u00b6 Some CSV files only deviate in their use of a different separator character. For example, some CSV files use the semi-colon ( ; ) or the at-sign ( @ ) for this. The following snippet extracts records for a CSV file that uses the semi-colon ( ; ) as the separator character: fromCsv(Source.file('example.csv'), { separator: ';' }), CSV with tab separators is not TSV \u00b6 Notice that the popular Tab-Separate Values (TSV) format is not the same as the standardized CSV format with a tab separator character. If you want to process standards-conforming TSV source data, use the fromTsv() extractor instead. Record representation \u00b6 TriplyETL treats every row in a CSV source as one record . The columns are emitted as keys and the cells are emitted as values. All values are of type string . Empty cells (i.e. those containing the empty string) are treated as denoting a null value and are therefore excluded from the record. Any trailing whitespace that appears in headers or cells is removed from the keys and values in the record. For example, the following table: ID Name Age 1 Doe, John 32 2 D., Jane can be expressed by the following CSV snippet: ID,Name,Age 1,\"Doe, John\",32 2,\"D., Jane \", which is emitted as the following two TriplyETL records: { \"ID\": \"1\", \"Name\": \"Doe, John\", \"Age\": \"32\" } { \"ID\": \"2\", \"Name\": \"D., Jane\" } Notice the following details: - All values have type string , including \"ID\" and \"Age\" . The value for field \"Age\" should probably be considered numeric, but the CSV format cannot express this. A TriplyETL transformation can be used to cast string values to numeric values. - The trailing space in \"D., Jane \" is omitted from the second record, since training whitespace is removed from all keys and values. - The \"Age\" key is missing from the second record, since the corresponding CSV cell contains the empty string, which is considered to denote an empty value.","title":"CSV"},{"location":"triply-etl/extract/csv/#csv-extractor","text":"CSV or Comma Separated Values (file name extension .csv ) is a popular format for storing tabular source data. TriplyETL has a dedicated fromCsv() extractor for this data format.","title":"CSV extractor"},{"location":"triply-etl/extract/csv/#basic-usage","text":"The CSV extractor is imported in the following way: import { fromCsv, Source } from '@triplyetl/etl/generic' The following code snippet extracts records from a local CSV file: fromCsv(Source.file('data.csv')), The following code snippet extracts records from an online CSV file, that is hosted at the specified URL: fromCsv(Source.url('https://somewhere.com/data.csv')), The following code snippet extracts records from a TriplyDB Asset . The asset is store in the data with name 'some-data' , under an account with name 'some-account' . The name of the asset is 'example.csv' : fromCsv( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.csv' } ) ),","title":"Basic usage"},{"location":"triply-etl/extract/csv/#standards-compliance","text":"The fromCsv() extractor implements the official CSV standard: IETF RFC 4180 . Some CSV files do not follow the standard precisely. In order to process such CSV files, the default behavior of the extractor can be changed through an optional options parameter. See the CSV Parse for Node.js documentation for all the available options.","title":"Standards-compliance"},{"location":"triply-etl/extract/csv/#encoding-configuration","text":"According to the official CSV standard, CSV sources are allowed to use any encoding. Since the CSV format does not allow the used encoding to be specified in the format itself, a non-standard encoding must always be configured manually. By default, TriplyETL assumes that CSV sources use the UTF-8 encoding. If another encoding is used, this must be explicitly specified by using the optional options parameter. The following snippet configures that the CSV source uses the ISO Latin-1 encoding: fromCsv( Source.TriplyDb.asset('my-dataset', { name: 'example.csv' }), { encoding: 'latin1' } ), The following encodings are currently supported: Value Encoding Standard Alternative values 'ascii' US-ASCII ANSI 'latin1' Latin-1 ISO-8859-1 binary 'utf8' UTF-8 Unicode 'utf16le' UTF-16 Little Endian Unicode 'ucs2' , 'ucs-2' , 'utf16-le' Read the CSV Parse for Node.js documentation for more information.","title":"Encoding configuration"},{"location":"triply-etl/extract/csv/#separator-configuration","text":"Some CSV files only deviate in their use of a different separator character. For example, some CSV files use the semi-colon ( ; ) or the at-sign ( @ ) for this. The following snippet extracts records for a CSV file that uses the semi-colon ( ; ) as the separator character: fromCsv(Source.file('example.csv'), { separator: ';' }),","title":"Separator configuration"},{"location":"triply-etl/extract/csv/#csv-with-tab-separators-is-not-tsv","text":"Notice that the popular Tab-Separate Values (TSV) format is not the same as the standardized CSV format with a tab separator character. If you want to process standards-conforming TSV source data, use the fromTsv() extractor instead.","title":"CSV with tab separators is not TSV"},{"location":"triply-etl/extract/csv/#record-representation","text":"TriplyETL treats every row in a CSV source as one record . The columns are emitted as keys and the cells are emitted as values. All values are of type string . Empty cells (i.e. those containing the empty string) are treated as denoting a null value and are therefore excluded from the record. Any trailing whitespace that appears in headers or cells is removed from the keys and values in the record. For example, the following table: ID Name Age 1 Doe, John 32 2 D., Jane can be expressed by the following CSV snippet: ID,Name,Age 1,\"Doe, John\",32 2,\"D., Jane \", which is emitted as the following two TriplyETL records: { \"ID\": \"1\", \"Name\": \"Doe, John\", \"Age\": \"32\" } { \"ID\": \"2\", \"Name\": \"D., Jane\" } Notice the following details: - All values have type string , including \"ID\" and \"Age\" . The value for field \"Age\" should probably be considered numeric, but the CSV format cannot express this. A TriplyETL transformation can be used to cast string values to numeric values. - The trailing space in \"D., Jane \" is omitted from the second record, since training whitespace is removed from all keys and values. - The \"Age\" key is missing from the second record, since the corresponding CSV cell contains the empty string, which is considered to denote an empty value.","title":"Record representation"},{"location":"triply-etl/extract/json/","text":"On this page: JSON extractor Basic usage Path selectors Nested keys Dealing with dots in keys Index-based list access JSON extractor \u00b6 JSON or JavaScript Object Notation (file name extension .json ) is a popular open standard for interchanging tree-shaped data. TriplyETL has a dedicated fromJson() extractor for this format. Basic usage \u00b6 The JSON extractor is imported in the following way: import { fromJson, Source } from '@triplyetl/etl/generic' The following code snippet extracts records from a JSON source that is stored as a TriplyDB asset : fromJson( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.json.gz' } ) ), The following example uses an in-line specified JSON source: fromJson([{ a: 'a', b: 'b', c: 'c' }]), TriplyETL supports the IETF RFC 8259 standard for JSON. Path selectors \u00b6 If the JSON data source is large, it may be necessary to stream through subtrees. The subtrees that should used, can be specified through the selectors option. For example, the following snippet streams to each person record individually: fromJson( {data: {persons: [{name: 'John'}, {name: 'Mary'}]}}, {selectors: 'data.persons'} ), Notice that the dot is used to specify paths, i.e. sequences of keys. It is also possible to specify multiple selectors by using an array of strings. Nested keys \u00b6 Since JSON is a tree-shaped format, it is able to store values in a nested structure. This requires a sequence or 'path' of keys to be specified. We use the following example data: { \"metadata\": { \"title\": { \"name\": \"Data about countries.\" } }, \"data\": { \"countries\": [ { \"country.id\": \"nl\", \"name\": \"The Netherlands\" }, { \"country.id\": \"de\", \"name\": \"Germany\" } ] } } Paths are specified as dot-separated sequences of keys, starting at the top-level and ending at the required value. For the JSON example above, TriplyETL can access the \"name\" key inside the \"title\" key, which itself is nested inside the \"metadata\" key. This path is expressed in [1]. Notice that the path expressed in [1] is different from the path expressed in [2], which also accesses the \"name\" key, but nested inside the \"countries\" and then \"data\" keys. (The use of the [0] index is explained in the next section.) [1] metadata.title.name [2] data.countries[0].name Path expressions can be used as string keys in many places in TriplyETL. For example, we can assert the title of a dataset in the following way: triple('_dataset', dct.title, 'metadata.title.name'), This asserts the following linked data: dataset:my-dataset dct:title 'Data about countries.'. Dealing with dots in keys \u00b6 In the previous section we saw that dots are used to separate keys in paths. However, sometimes a dot can occur as a regular character inside a key. In such cases, we need to apply additional escaping of the key name to avoid naming conflicts. The example data from the previous section contains the following key: \"country.id\" Notice that the dot is here part of the key name. We can refer to these keys as follows: triple('_country', dct.id, 'data.countries[0].[\"country.id\"]'), Notice the use of additional escaping: [\"...\"] Index-based list access \u00b6 Tree-shaped data formats often allow multiple values to be specified in an ordered list. Examples of this are arrays in JSON and XML elements with the same tag that are directly nested under the same parent element. TriplyETL is able to access specific elements from lists based on their index or position. Following the standard practice in Computer Science, TriplyETL refers to the first element in the list as having index 0. The second element has index 1, etc. For the above example record, we can assert the name of the first country as follows: triple( iri(prefix.id, 'data.countries[0].[\"country.id\"]'), rdfs.label, 'data.countries[0].name' ), This results in the following linked data: id:nl rdfs:label 'The Netherlands'. We can also assert the name of the second country. Notice that only the index is different ( 1 instead of 0 ): triple( iri(prefix.id, 'data.countries[1].[\"country.id\"]'), rdfs.label, 'data.countries[1].name' ), This results in the following linked data: id:de rdfs:label 'Germany'.","title":"JSON"},{"location":"triply-etl/extract/json/#json-extractor","text":"JSON or JavaScript Object Notation (file name extension .json ) is a popular open standard for interchanging tree-shaped data. TriplyETL has a dedicated fromJson() extractor for this format.","title":"JSON extractor"},{"location":"triply-etl/extract/json/#basic-usage","text":"The JSON extractor is imported in the following way: import { fromJson, Source } from '@triplyetl/etl/generic' The following code snippet extracts records from a JSON source that is stored as a TriplyDB asset : fromJson( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.json.gz' } ) ), The following example uses an in-line specified JSON source: fromJson([{ a: 'a', b: 'b', c: 'c' }]), TriplyETL supports the IETF RFC 8259 standard for JSON.","title":"Basic usage"},{"location":"triply-etl/extract/json/#path-selectors","text":"If the JSON data source is large, it may be necessary to stream through subtrees. The subtrees that should used, can be specified through the selectors option. For example, the following snippet streams to each person record individually: fromJson( {data: {persons: [{name: 'John'}, {name: 'Mary'}]}}, {selectors: 'data.persons'} ), Notice that the dot is used to specify paths, i.e. sequences of keys. It is also possible to specify multiple selectors by using an array of strings.","title":"Path selectors"},{"location":"triply-etl/extract/json/#nested-keys","text":"Since JSON is a tree-shaped format, it is able to store values in a nested structure. This requires a sequence or 'path' of keys to be specified. We use the following example data: { \"metadata\": { \"title\": { \"name\": \"Data about countries.\" } }, \"data\": { \"countries\": [ { \"country.id\": \"nl\", \"name\": \"The Netherlands\" }, { \"country.id\": \"de\", \"name\": \"Germany\" } ] } } Paths are specified as dot-separated sequences of keys, starting at the top-level and ending at the required value. For the JSON example above, TriplyETL can access the \"name\" key inside the \"title\" key, which itself is nested inside the \"metadata\" key. This path is expressed in [1]. Notice that the path expressed in [1] is different from the path expressed in [2], which also accesses the \"name\" key, but nested inside the \"countries\" and then \"data\" keys. (The use of the [0] index is explained in the next section.) [1] metadata.title.name [2] data.countries[0].name Path expressions can be used as string keys in many places in TriplyETL. For example, we can assert the title of a dataset in the following way: triple('_dataset', dct.title, 'metadata.title.name'), This asserts the following linked data: dataset:my-dataset dct:title 'Data about countries.'.","title":"Nested keys"},{"location":"triply-etl/extract/json/#dealing-with-dots-in-keys","text":"In the previous section we saw that dots are used to separate keys in paths. However, sometimes a dot can occur as a regular character inside a key. In such cases, we need to apply additional escaping of the key name to avoid naming conflicts. The example data from the previous section contains the following key: \"country.id\" Notice that the dot is here part of the key name. We can refer to these keys as follows: triple('_country', dct.id, 'data.countries[0].[\"country.id\"]'), Notice the use of additional escaping: [\"...\"]","title":"Dealing with dots in keys"},{"location":"triply-etl/extract/json/#index-based-list-access","text":"Tree-shaped data formats often allow multiple values to be specified in an ordered list. Examples of this are arrays in JSON and XML elements with the same tag that are directly nested under the same parent element. TriplyETL is able to access specific elements from lists based on their index or position. Following the standard practice in Computer Science, TriplyETL refers to the first element in the list as having index 0. The second element has index 1, etc. For the above example record, we can assert the name of the first country as follows: triple( iri(prefix.id, 'data.countries[0].[\"country.id\"]'), rdfs.label, 'data.countries[0].name' ), This results in the following linked data: id:nl rdfs:label 'The Netherlands'. We can also assert the name of the second country. Notice that only the index is different ( 1 instead of 0 ): triple( iri(prefix.id, 'data.countries[1].[\"country.id\"]'), rdfs.label, 'data.countries[1].name' ), This results in the following linked data: id:de rdfs:label 'Germany'.","title":"Index-based list access"},{"location":"triply-etl/extract/oai-pmh/","text":"On this page: OAI-PMH extractor Basic usage Standards-compliance Verb 'ListIdentifiers' Verb 'ListRecords' OAI-PMH extractor \u00b6 In the GLAM (Galleries, Libraries, Archives, Museums) domain, the Open Archives Initiative (OAI), Protocol for Metadata Harvesting (PMH) is a popular protocol and format for publishing data collections. TriplyETL has a dedicated fromOai() extractor to tap into these data collections. The fromOai() extractor ensures a continuous stream of data records. Under the hood, the extractor uses resumption tokens to iterate over large collections. Basic usage \u00b6 The OAI-PMH extractor is imported in the following way: import { fromOai, Source } from '@triplyetl/etl/generic' An OAI-PMH endpoint can be configured by specifying its URL (parameter url ). Since one OAI-PMH endpoint typically publishes multiple datasets, it is also common to specify the set parameter. The following code snippet connects to an example dataset that is published in an OAI-PMH endpoint: fromOai({ set: 'some-dataset', url: 'https://somewhere.com/webapioai/oai.ashx' }), Standards-compliance \u00b6 TriplyETL supports the official OAI-PMH standard. The OAI-PMH standard defines 6 'verbs'. These are different sub-APIs that together compose the OAI-PMH API. The fromOai() extractor currently supports the following two verbs: ListIdentifiers and ListRecords . Verb 'ListIdentifiers' \u00b6 This 'verb' or sub-API streams through the headers of all records. It does not returns the actual (body) content of each record (see ListRecords ). This verb can be used to look for header properties like set membership, datestamp, and deletion status. The following code snippet streams through the headers of a public OAI-PMH endpoint: fromOai({ metadataPrefix: 'marcxml', set: 'iish.evergreen.biblio', url: 'https://api.socialhistoryservices.org/solr/all/oai', verb: 'ListIdentifiers' }), logRecord(), Verb 'ListRecords' \u00b6 This 'verb' or sub-API streams through all records and retrieves them in full. This API is used to harvest records. The following code snippet streams through the records of a public OAI-PMH endpoint: fromOai({ metadataPrefix: 'marcxml', set: 'iish.evergreen.biblio', url: 'https://api.socialhistoryservices.org/solr/all/oai', verb: 'ListRecords' }), logRecord(),","title":"OAI-PMH"},{"location":"triply-etl/extract/oai-pmh/#oai-pmh-extractor","text":"In the GLAM (Galleries, Libraries, Archives, Museums) domain, the Open Archives Initiative (OAI), Protocol for Metadata Harvesting (PMH) is a popular protocol and format for publishing data collections. TriplyETL has a dedicated fromOai() extractor to tap into these data collections. The fromOai() extractor ensures a continuous stream of data records. Under the hood, the extractor uses resumption tokens to iterate over large collections.","title":"OAI-PMH extractor"},{"location":"triply-etl/extract/oai-pmh/#basic-usage","text":"The OAI-PMH extractor is imported in the following way: import { fromOai, Source } from '@triplyetl/etl/generic' An OAI-PMH endpoint can be configured by specifying its URL (parameter url ). Since one OAI-PMH endpoint typically publishes multiple datasets, it is also common to specify the set parameter. The following code snippet connects to an example dataset that is published in an OAI-PMH endpoint: fromOai({ set: 'some-dataset', url: 'https://somewhere.com/webapioai/oai.ashx' }),","title":"Basic usage"},{"location":"triply-etl/extract/oai-pmh/#standards-compliance","text":"TriplyETL supports the official OAI-PMH standard. The OAI-PMH standard defines 6 'verbs'. These are different sub-APIs that together compose the OAI-PMH API. The fromOai() extractor currently supports the following two verbs: ListIdentifiers and ListRecords .","title":"Standards-compliance"},{"location":"triply-etl/extract/oai-pmh/#verb-listidentifiers","text":"This 'verb' or sub-API streams through the headers of all records. It does not returns the actual (body) content of each record (see ListRecords ). This verb can be used to look for header properties like set membership, datestamp, and deletion status. The following code snippet streams through the headers of a public OAI-PMH endpoint: fromOai({ metadataPrefix: 'marcxml', set: 'iish.evergreen.biblio', url: 'https://api.socialhistoryservices.org/solr/all/oai', verb: 'ListIdentifiers' }), logRecord(),","title":"Verb 'ListIdentifiers'"},{"location":"triply-etl/extract/oai-pmh/#verb-listrecords","text":"This 'verb' or sub-API streams through all records and retrieves them in full. This API is used to harvest records. The following code snippet streams through the records of a public OAI-PMH endpoint: fromOai({ metadataPrefix: 'marcxml', set: 'iish.evergreen.biblio', url: 'https://api.socialhistoryservices.org/solr/all/oai', verb: 'ListRecords' }), logRecord(),","title":"Verb 'ListRecords'"},{"location":"triply-etl/extract/postgres/","text":"On this page: Postgres extractor Basic usage Connector configuration Postgres extractor \u00b6 PostgreSQL or Postgres is an open-source relational database system. Postgres supports both SQL (relational) and JSON (non-relational) querying. TriplyETL has a dedicated fromPostgres() extractor to retrieve data from a Postgres database. Basic usage \u00b6 The Postgres extractor is imported in the following way: import { fromPostgres, Source } from '@triplyetl/etl/generic' The following code snippet extracts records form a public database URL: fromPostgres( 'select * from rnc_database', { url: 'postgres://reader:NWDMCE5xdipIjRrp@hh-pgsql-public.ebi.ac.uk:5432/pfmegrnargs' } ), Connector configuration \u00b6 Alternatively, a Postgres dataset can be accessed via connector configuration. The following code snippet accesses the same public database, but uses connector configuration to do so: fromPostgres( 'select * from rnc_database', { host: 'hh-pgsql-public.ebi.ac.uk', port: 5432, database: 'pfmegrnargs', user: 'reader', password: 'NWDMCE5xdipIjRrp', } ),","title":"Postgres"},{"location":"triply-etl/extract/postgres/#postgres-extractor","text":"PostgreSQL or Postgres is an open-source relational database system. Postgres supports both SQL (relational) and JSON (non-relational) querying. TriplyETL has a dedicated fromPostgres() extractor to retrieve data from a Postgres database.","title":"Postgres extractor"},{"location":"triply-etl/extract/postgres/#basic-usage","text":"The Postgres extractor is imported in the following way: import { fromPostgres, Source } from '@triplyetl/etl/generic' The following code snippet extracts records form a public database URL: fromPostgres( 'select * from rnc_database', { url: 'postgres://reader:NWDMCE5xdipIjRrp@hh-pgsql-public.ebi.ac.uk:5432/pfmegrnargs' } ),","title":"Basic usage"},{"location":"triply-etl/extract/postgres/#connector-configuration","text":"Alternatively, a Postgres dataset can be accessed via connector configuration. The following code snippet accesses the same public database, but uses connector configuration to do so: fromPostgres( 'select * from rnc_database', { host: 'hh-pgsql-public.ebi.ac.uk', port: 5432, database: 'pfmegrnargs', user: 'reader', password: 'NWDMCE5xdipIjRrp', } ),","title":"Connector configuration"},{"location":"triply-etl/extract/rdf/","text":"On this page: RDF loader Basic usage Loading RDF from an HTML page RDF loader \u00b6 RDF or Resource Description Framework (file name extensions .nq , .nt , .jsonld , .rdf , .trig , .turtle ) is the standardized format for linked data. We do not need to extract records from an RDF data source, but can instead load its contents directly into the internal store . Basic usage \u00b6 The RDF loader is imported in the following way: import { loadRdf, Source } from '@triplyetl/etl/generic' The following code snippet loads RDF from the specified TriplyDB Dataset into the internal store: loadRdf(Source.TriplyDb.rdf('my-account', 'my-dataset')), The following code snippet loads RDF from a SPARQL Construct query that is stored as a TriplyDB Query : loadRdf(Source.TriplyDb.query('Triply', 'network-query')), Loading RDF from an HTML page \u00b6 With loadRdf() extractor, it is also possible to extract data from web pages / HTML, which contain Schema in JSON-LD. This is possible because most websites contain linked data annotations that use Schema.org . Such linked data is enclosed in a tag: Schema markup is how Google can serve up rich results (also called rich snippets and rich cards). The schema is included in HTML in the following way: The Script Type : What format your structured data will take (JSON-LD) The Context : Where the language you\u2019re using comes from (schema.org) The Type : What kind of thing is the search engine looking at (Article) The Property : What kind of quality will you be describing when it comes to this type (url) The Value : What you\u2019re actually telling the search engines about this property (the URL of the article) Example taken from Wikipedia: The Wikipedia page of the first programmer in history (https://en.wikipedia.org/wiki/Ada_Lovelace) contains the following linked data: { \"@context\": \"https://schema.org\", \"@type\": \"Article\", \"name\": \"Ada Lovelace\", \"url\": \"https://en.wikipedia.org/wiki/Ada_Lovelace\", \"sameAs\": \"http://www.wikidata.org/entity/Q7259\", \"mainEntity\": \"http://www.wikidata.org/entity/Q7259\", \"author\": { \"@type\": \"Organization\", \"name\": \"Contributors to Wikimedia projects\" }, \"publisher\": { \"@type\": \"Organization\", \"name\": \"Wikimedia Foundation, Inc.\", \"logo\": { \"@type\": \"ImageObject\", \"url\": \"https://www.wikimedia.org/static/images/wmf-hor-googpub.png\" } }, \"datePublished\": \"2001-05-20T14:57:05Z\", \"dateModified\": \"2023-03-17T21:28:23Z\", \"image\": \"https://upload.wikimedia.org/wikipedia/commons/0/0b/Ada_Byron_daguerreotype_by_Antoine_Claudet_1843_or_1850.jpg\", \"headline\": \"1815-1852 British mathematician, considered the first computer programmer\" } This data can be loaded with the following code snippet: loadRdf( Source.url('https://en.wikipedia.org/wiki/Ada_Lovelace'), { contentType: 'text/html' } ),","title":"RDF"},{"location":"triply-etl/extract/rdf/#rdf-loader","text":"RDF or Resource Description Framework (file name extensions .nq , .nt , .jsonld , .rdf , .trig , .turtle ) is the standardized format for linked data. We do not need to extract records from an RDF data source, but can instead load its contents directly into the internal store .","title":"RDF loader"},{"location":"triply-etl/extract/rdf/#basic-usage","text":"The RDF loader is imported in the following way: import { loadRdf, Source } from '@triplyetl/etl/generic' The following code snippet loads RDF from the specified TriplyDB Dataset into the internal store: loadRdf(Source.TriplyDb.rdf('my-account', 'my-dataset')), The following code snippet loads RDF from a SPARQL Construct query that is stored as a TriplyDB Query : loadRdf(Source.TriplyDb.query('Triply', 'network-query')),","title":"Basic usage"},{"location":"triply-etl/extract/rdf/#loading-rdf-from-an-html-page","text":"With loadRdf() extractor, it is also possible to extract data from web pages / HTML, which contain Schema in JSON-LD. This is possible because most websites contain linked data annotations that use Schema.org . Such linked data is enclosed in a tag: Schema markup is how Google can serve up rich results (also called rich snippets and rich cards). The schema is included in HTML in the following way: The Script Type : What format your structured data will take (JSON-LD) The Context : Where the language you\u2019re using comes from (schema.org) The Type : What kind of thing is the search engine looking at (Article) The Property : What kind of quality will you be describing when it comes to this type (url) The Value : What you\u2019re actually telling the search engines about this property (the URL of the article) Example taken from Wikipedia: The Wikipedia page of the first programmer in history (https://en.wikipedia.org/wiki/Ada_Lovelace) contains the following linked data: { \"@context\": \"https://schema.org\", \"@type\": \"Article\", \"name\": \"Ada Lovelace\", \"url\": \"https://en.wikipedia.org/wiki/Ada_Lovelace\", \"sameAs\": \"http://www.wikidata.org/entity/Q7259\", \"mainEntity\": \"http://www.wikidata.org/entity/Q7259\", \"author\": { \"@type\": \"Organization\", \"name\": \"Contributors to Wikimedia projects\" }, \"publisher\": { \"@type\": \"Organization\", \"name\": \"Wikimedia Foundation, Inc.\", \"logo\": { \"@type\": \"ImageObject\", \"url\": \"https://www.wikimedia.org/static/images/wmf-hor-googpub.png\" } }, \"datePublished\": \"2001-05-20T14:57:05Z\", \"dateModified\": \"2023-03-17T21:28:23Z\", \"image\": \"https://upload.wikimedia.org/wikipedia/commons/0/0b/Ada_Byron_daguerreotype_by_Antoine_Claudet_1843_or_1850.jpg\", \"headline\": \"1815-1852 British mathematician, considered the first computer programmer\" } This data can be loaded with the following code snippet: loadRdf( Source.url('https://en.wikipedia.org/wiki/Ada_Lovelace'), { contentType: 'text/html' } ),","title":"Loading RDF from an HTML page"},{"location":"triply-etl/extract/shapefile/","text":"On this page: Shapefile extractor Basic usage Record representation Shapefile extractor \u00b6 The ESRI Shapefile format was developed by Esri (Environmental Systems Research Institute) for interoperability between Geographic Information Systems (GIS). An ESRI Shapefile is a ZIP with six files in it (file name extension .shapefile.zip ). Currently only one of the file in a Shapefile ZIP file is supported: the .shp file. Basic usage \u00b6 The Shapefile extractor is imported in the following way: import { fromShapefile, Source } from '@triplyetl/etl/generic' The following code snippet extracts records from a local Shapefile: fromShapefile(Source.file('example.shp')) The following code snippet extracts records from a Shapefile that is stored as a TriplyDB Asset: fromShapefile( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.shp' } ) ), Record representation \u00b6 The following example record is obtained from a file called nl_1km.shp that is published by the European Environment Agency : { '$recordId': 1, '$environment': 'Development', '$fileName': 'nl_1km.shp', type: 'Feature', properties: { CELLCODE: '1kmE3793N3217', EOFORIGIN: 3793000, NOFORIGIN: 3217000 }, geometry: { type: 'Polygon', coordinates: [ [ [ 3793000, 3217000 ], [ 3793000, 3218000 ], [ 3794000, 3218000 ], [ 3794000, 3217000 ], [ 3793000, 3217000 ] ] ] } }","title":"Shapefile"},{"location":"triply-etl/extract/shapefile/#shapefile-extractor","text":"The ESRI Shapefile format was developed by Esri (Environmental Systems Research Institute) for interoperability between Geographic Information Systems (GIS). An ESRI Shapefile is a ZIP with six files in it (file name extension .shapefile.zip ). Currently only one of the file in a Shapefile ZIP file is supported: the .shp file.","title":"Shapefile extractor"},{"location":"triply-etl/extract/shapefile/#basic-usage","text":"The Shapefile extractor is imported in the following way: import { fromShapefile, Source } from '@triplyetl/etl/generic' The following code snippet extracts records from a local Shapefile: fromShapefile(Source.file('example.shp')) The following code snippet extracts records from a Shapefile that is stored as a TriplyDB Asset: fromShapefile( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.shp' } ) ),","title":"Basic usage"},{"location":"triply-etl/extract/shapefile/#record-representation","text":"The following example record is obtained from a file called nl_1km.shp that is published by the European Environment Agency : { '$recordId': 1, '$environment': 'Development', '$fileName': 'nl_1km.shp', type: 'Feature', properties: { CELLCODE: '1kmE3793N3217', EOFORIGIN: 3793000, NOFORIGIN: 3217000 }, geometry: { type: 'Polygon', coordinates: [ [ [ 3793000, 3217000 ], [ 3793000, 3218000 ], [ 3794000, 3218000 ], [ 3794000, 3217000 ], [ 3793000, 3217000 ] ] ] } }","title":"Record representation"},{"location":"triply-etl/extract/tsv/","text":"On this page: CSV extractor Basic usage Extractor for TSV (Tab-Separated Values) Record representation CSV extractor \u00b6 CSV or Comma Separated Values (file name extension .csv ) is a popular format for storing tabular source data. TriplyETL has a dedicated fromCsv() extractor for this data format. Basic usage \u00b6 The TSV extractor is imported in the following way: import { fromTsv, Source } from '@triplyetl/etl/generic' Extractor for TSV (Tab-Separated Values) \u00b6 TSV or Tab-Separated Values (file name extension .tsv ) is a popular format for tabular source data. TriplyETL has a fromTsv() extractor to support this format. The following code snippet extracts records for TSV file that is stored as a TriplyDB Asset : fromTsv( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.tsv.gz' } ) ), TriplyETL supports the IANA standard definition of the TSV format. Record representation \u00b6 TriplyETL treats every row in a TSV source as one record. The columns are emitted as keys and the cells are emitted as values. All values are of type string . Cells that contain the empty string are treated as denoting an empty value and are excluded from the record. Any trailing whitespace that appears in headers or cells is removed from the keys or values in the record. For example, the following table: ID Name Age 1 Doe, John 32 2 D., Jane can be expressed by the following TSV snippet: ID Name Age 1 Doe, John 32 2 D., Jane which is emitted as the following two TriplyETL records: { \"ID\": \"1\", \"Name\": \"Doe, John\", \"Age\": \"32\" } { \"ID\": \"2\", \"Name\": \"D., Jane\" } Notice the following details: All values have type string , including \"ID\" and \"Age\" . The value for field \"Age\" should probably be considered numeric, but the TSV format cannot express this. A TriplyETL transformation can be used to cast string values to numeric values. The trailing space in \"D., Jane \" is omitted from the second record, since training whitespace is removed from all keys and values. The \"Age\" key is missing from the second record, since the corresponding TSV cell contains the empty string, which is considered to denote an empty value.","title":"TSV"},{"location":"triply-etl/extract/tsv/#csv-extractor","text":"CSV or Comma Separated Values (file name extension .csv ) is a popular format for storing tabular source data. TriplyETL has a dedicated fromCsv() extractor for this data format.","title":"CSV extractor"},{"location":"triply-etl/extract/tsv/#basic-usage","text":"The TSV extractor is imported in the following way: import { fromTsv, Source } from '@triplyetl/etl/generic'","title":"Basic usage"},{"location":"triply-etl/extract/tsv/#extractor-for-tsv-tab-separated-values","text":"TSV or Tab-Separated Values (file name extension .tsv ) is a popular format for tabular source data. TriplyETL has a fromTsv() extractor to support this format. The following code snippet extracts records for TSV file that is stored as a TriplyDB Asset : fromTsv( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.tsv.gz' } ) ), TriplyETL supports the IANA standard definition of the TSV format.","title":"Extractor for TSV (Tab-Separated Values)"},{"location":"triply-etl/extract/tsv/#record-representation","text":"TriplyETL treats every row in a TSV source as one record. The columns are emitted as keys and the cells are emitted as values. All values are of type string . Cells that contain the empty string are treated as denoting an empty value and are excluded from the record. Any trailing whitespace that appears in headers or cells is removed from the keys or values in the record. For example, the following table: ID Name Age 1 Doe, John 32 2 D., Jane can be expressed by the following TSV snippet: ID Name Age 1 Doe, John 32 2 D., Jane which is emitted as the following two TriplyETL records: { \"ID\": \"1\", \"Name\": \"Doe, John\", \"Age\": \"32\" } { \"ID\": \"2\", \"Name\": \"D., Jane\" } Notice the following details: All values have type string , including \"ID\" and \"Age\" . The value for field \"Age\" should probably be considered numeric, but the TSV format cannot express this. A TriplyETL transformation can be used to cast string values to numeric values. The trailing space in \"D., Jane \" is omitted from the second record, since training whitespace is removed from all keys and values. The \"Age\" key is missing from the second record, since the corresponding TSV cell contains the empty string, which is considered to denote an empty value.","title":"Record representation"},{"location":"triply-etl/extract/xlsx/","text":"On this page: XLSX extractor (Microsoft Excel) Basic usage Multiple sheets Record representation Special key '$sheetName' XLSX extractor (Microsoft Excel) \u00b6 XLSX or Office Open XML Workbook for Microsoft Excel (file name extension .xlsx ) is a popular format for storing tabular source data. This is the standard file format for Microsoft Excel. TriplyETL has a dedicated fromXlsx() extractor for such sources. Basic usage \u00b6 The XLSX extractor is imported in the following way: import { fromXlsx, Source } from '@triplyetl/etl/generic' The following code snippet shows how a TriplyDB assets is used to process records from an XLSX source: fromXlsx( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.xlsx' } ) ), The fromXlsx() extractor emits one record per row in the source file. Multiple sheets \u00b6 It is common for XLSX files to have multiple sheets. By default the fromXlsx() extractor enumerates all rows from all sheets as records. If only some sheets should be used, this can be specified as a configuration option. The following code snippet only emits records/rows from the 'people' and 'projects' sheets in the XLSX source file 'example.xlsx' . Rows from other sheets in the same XLSX file are not emitted: fromXlsx(Source.file('example.xlsx'), { sheetNames: ['people', 'projects'] }), Record representation \u00b6 TriplyETL treats every row in XLSX sheet as one record . The columns are emitted as keys and the cells are emitted as values. Unlike other tabular formats like CSV and TSV , values in XLSX can have different types. For example, the following table: ID Name Age 1 Doe, John 32 2 D., Jane can be emitted as the following two TriplyETL records: { \"$recordId\": 1, \"$environment\": \"Development\", \"$sheetName\": \"Sheet1\", \"$fileName\": \"static/Untitled 1.xlsx\", \"ID\": \"1\", \"Name\": \"Doe, John\", \"Age\": 32 } { \"$recordId\": 2, \"$environment\": \"Development\", \"$sheetName\": \"Sheet1\", \"$fileName\": \"static/Untitled 1.xlsx\", \"ID\": \"2\", \"Name\": \"D., Jane\", } Notice the following: - The value for the \"Age\" key is a number. - The special keys $recordId , $environment , and $fileName are documented in the section on Special Keys . - The special key $sheetName is unique to the fromXslx() extractor and is documented in the next subsection. Special key '$sheetName' \u00b6 For every record emitted by the fromXlsx() extractor. the $sheetName special key contains the name of the Excel sheet from which that record originates. The presence of the sheet name allows the TriplyETL configuration to be adjusted for different sheet. For example, an Excel spreadsheet may contain a 'companies' sheet and a 'persons' sheet. The name of the sheet may be used to determine which class should be asserted. The following snippet uses transformation translateAll() to map sheet names to class IRIs: fromXlsx(Source.file('example.xlsx')), translateAll({ content: '$sheetName', table: { 'companies': sdo.Organization, 'persons': sdo.Person, }, key: '_class', }), triple(iri(prefix.id, '$recordId'), a, '_class'),","title":"XLSX"},{"location":"triply-etl/extract/xlsx/#xlsx-extractor-microsoft-excel","text":"XLSX or Office Open XML Workbook for Microsoft Excel (file name extension .xlsx ) is a popular format for storing tabular source data. This is the standard file format for Microsoft Excel. TriplyETL has a dedicated fromXlsx() extractor for such sources.","title":"XLSX extractor (Microsoft Excel)"},{"location":"triply-etl/extract/xlsx/#basic-usage","text":"The XLSX extractor is imported in the following way: import { fromXlsx, Source } from '@triplyetl/etl/generic' The following code snippet shows how a TriplyDB assets is used to process records from an XLSX source: fromXlsx( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.xlsx' } ) ), The fromXlsx() extractor emits one record per row in the source file.","title":"Basic usage"},{"location":"triply-etl/extract/xlsx/#multiple-sheets","text":"It is common for XLSX files to have multiple sheets. By default the fromXlsx() extractor enumerates all rows from all sheets as records. If only some sheets should be used, this can be specified as a configuration option. The following code snippet only emits records/rows from the 'people' and 'projects' sheets in the XLSX source file 'example.xlsx' . Rows from other sheets in the same XLSX file are not emitted: fromXlsx(Source.file('example.xlsx'), { sheetNames: ['people', 'projects'] }),","title":"Multiple sheets"},{"location":"triply-etl/extract/xlsx/#record-representation","text":"TriplyETL treats every row in XLSX sheet as one record . The columns are emitted as keys and the cells are emitted as values. Unlike other tabular formats like CSV and TSV , values in XLSX can have different types. For example, the following table: ID Name Age 1 Doe, John 32 2 D., Jane can be emitted as the following two TriplyETL records: { \"$recordId\": 1, \"$environment\": \"Development\", \"$sheetName\": \"Sheet1\", \"$fileName\": \"static/Untitled 1.xlsx\", \"ID\": \"1\", \"Name\": \"Doe, John\", \"Age\": 32 } { \"$recordId\": 2, \"$environment\": \"Development\", \"$sheetName\": \"Sheet1\", \"$fileName\": \"static/Untitled 1.xlsx\", \"ID\": \"2\", \"Name\": \"D., Jane\", } Notice the following: - The value for the \"Age\" key is a number. - The special keys $recordId , $environment , and $fileName are documented in the section on Special Keys . - The special key $sheetName is unique to the fromXslx() extractor and is documented in the next subsection.","title":"Record representation"},{"location":"triply-etl/extract/xlsx/#special-key-sheetname","text":"For every record emitted by the fromXlsx() extractor. the $sheetName special key contains the name of the Excel sheet from which that record originates. The presence of the sheet name allows the TriplyETL configuration to be adjusted for different sheet. For example, an Excel spreadsheet may contain a 'companies' sheet and a 'persons' sheet. The name of the sheet may be used to determine which class should be asserted. The following snippet uses transformation translateAll() to map sheet names to class IRIs: fromXlsx(Source.file('example.xlsx')), translateAll({ content: '$sheetName', table: { 'companies': sdo.Organization, 'persons': sdo.Person, }, key: '_class', }), triple(iri(prefix.id, '$recordId'), a, '_class'),","title":"Special key '$sheetName'"},{"location":"triply-etl/extract/xml/","text":"On this page: XML extractor Basic usage Path selectors Nested keys Dealing with dots in keys Index-based list access XML extractor \u00b6 XML or Extensible Markup Language (file name extension .xml ) is a popular open format for tree-shaped source data. TriplyETL has a dedicated fromXml() extractor for this data format. Basic usage \u00b6 The XML extractor is imported in the following way: import { fromXml, Source } from '@triplyetl/etl/generic' The following code snippet extracts records from an XML file that is stored as a TriplyDB Asset : fromXml( Source.TriplyDb.asset('my-dataset', {name: 'my-data.xml'}), {selectors: 'first-element'} ), Notice that the fromXml() extractor requires a selectors option. This specifies the subtrees in the XML that should be treated as individual records. In the above snippet the records are the subtrees that occur between the opening tag and the closing tag. Path selectors \u00b6 If a deeper path must be specified, sequential tags in the path must be separated by a dot: fromXml( Source.TriplyDb.asset('my-dataset', {name: 'my-data.xml'}), { selectors: 'first-element.second-element.third-element' } ), It is common for large XML sources to contain different kinds of records. Different kinds of records often occur under different paths. It is therefore possible to specify multiple paths, all of which will be used for extract records from the XML source. The following code snippet extracts records for three different paths in the same XML source: fromXml( Source.TriplyDb.asset('my-dataset', {name: 'my-data.xml'}), { selectors: [ 'first-element.second-element.third-element', 'first-element.second-element.alt-element', 'first-element.second-element.other-element', ] } ), TriplyETL supports the W3C XML standard. Nested keys \u00b6 Since XML can store tree-shaped data, it can have nested keys (paths) and indexed arrays. <name>Data about countries.</name> The Netherlands Germany Paths are specified as dot-separated sequences of keys, starting at the top-level and ending at the required value. For the XML example above, TriplyETL can access the textual content inside the \"name\" key, which itself is nested inside the \"title\" , \"metadata\" , and \"root\" keys. This path is expressed in [1]. Notice that the path expressed in [1] is different from the path expressed in [2], which accesses the textual content of the \"name\" key, but nested inside the \"country\" , \"data\" , and \"root\" keys. (The use of the [0] index is explained in the next section.) [1] root.metadata.title.name.$text [2] root.data.country[0].name.$text Path expressions can be used as string keys in many places in TriplyETL. For example, we can assert the title of a dataset in the following way: etl.use( triple( prefix.dataset('my-dataset'), dct.title, literal('root.metadata.title.name.$text', 'en') ), ) This results in the following assertion: dataset:my-dataset dct:title 'Data about countries.'@en. Dealing with dots in keys \u00b6 In the previous section we saw that dots are used to separate keys in paths. However, sometimes a dot can occur as a regular character inside a key. In such cases, we need to apply additional escaping of the key name to avoid naming conflicts. The example data from the previous section contains XML attribute [1], which is represented by key [2] in the TriplyETL record. [1] country.id [2] [\"@country.id\"] Notice that the dot in [2] is part of the key name. The escape notation [\"...\"] ensures that the dot is not misinterpreted as denoting a sequence of keys. Overall, \u2018a.b\u2019 notation allow going into nested object and accessing values within the nest while \u2018[\u201ca.b\u201d]\u2019 takes value a.b key as a name, therefore does not go into the nest. The following extensive example shows how complex sequences of keys with dots in them can be used: { \"a\": { \"$text\": \"1\" }, \"b\": { \"c\": { \"$text\": \"2\" } }, \"b.c\": { \"$text\": \"3\" }, \"d.d\": { \"e\": { \"$text\": \"4\" }, \"f\": { \"$text\": \"5\" } }, \"g.g\": [ { \"h.h\": { \"$text\": \"6\" } }, { \"h.h\": { \"$text\": \"7\" } } ] } Key Value 'a.$text' 1 'b.c.$text' 2 '[\"b.c\"].$text' 3 '[\"d.d\"].e.$text' 4 '[\"d.d\"].f'.$text' 5 '[\"g.g\"][0][\"h.h\"].$text' 6 '[\"g.g\"][1][\"h.h\"].$text' 7 Index-based list access \u00b6 Tree-shaped data formats often allow multiple values to be specified in an ordered list. Examples of this are arrays in JSON and XML elements with the same tag that are directly nested under the same parent element. TriplyETL is able to access specific elements from lists based on their index or position. Following the standard practice in Computer Science, TriplyETL refers to the first element in the list as having index 0. The second element has index 1, etc. For the above example record, we can assert the name of the first country as follows: triple( iri(prefix.country, 'root.data.country[0].[\"@country.id\"]'), rdfs.label, literal('root.data.countries[0].name.$text', 'en') ), This results in the following assertion: country:nl rdfs:label 'The Netherlands'@en. We can also assert the name of the second country. Notice that only the index is different (\u20181\u2019 instead of \u20180\u2019): triple( iri(prefix.country, 'root.data.countries[1].[\"@country.id\"]'), ... This results in the following assertion: country:de rdfs:label 'Germany'@en.","title":"XML"},{"location":"triply-etl/extract/xml/#xml-extractor","text":"XML or Extensible Markup Language (file name extension .xml ) is a popular open format for tree-shaped source data. TriplyETL has a dedicated fromXml() extractor for this data format.","title":"XML extractor"},{"location":"triply-etl/extract/xml/#basic-usage","text":"The XML extractor is imported in the following way: import { fromXml, Source } from '@triplyetl/etl/generic' The following code snippet extracts records from an XML file that is stored as a TriplyDB Asset : fromXml( Source.TriplyDb.asset('my-dataset', {name: 'my-data.xml'}), {selectors: 'first-element'} ), Notice that the fromXml() extractor requires a selectors option. This specifies the subtrees in the XML that should be treated as individual records. In the above snippet the records are the subtrees that occur between the opening tag and the closing tag.","title":"Basic usage"},{"location":"triply-etl/extract/xml/#path-selectors","text":"If a deeper path must be specified, sequential tags in the path must be separated by a dot: fromXml( Source.TriplyDb.asset('my-dataset', {name: 'my-data.xml'}), { selectors: 'first-element.second-element.third-element' } ), It is common for large XML sources to contain different kinds of records. Different kinds of records often occur under different paths. It is therefore possible to specify multiple paths, all of which will be used for extract records from the XML source. The following code snippet extracts records for three different paths in the same XML source: fromXml( Source.TriplyDb.asset('my-dataset', {name: 'my-data.xml'}), { selectors: [ 'first-element.second-element.third-element', 'first-element.second-element.alt-element', 'first-element.second-element.other-element', ] } ), TriplyETL supports the W3C XML standard.","title":"Path selectors"},{"location":"triply-etl/extract/xml/#nested-keys","text":"Since XML can store tree-shaped data, it can have nested keys (paths) and indexed arrays. <name>Data about countries.</name> The Netherlands Germany Paths are specified as dot-separated sequences of keys, starting at the top-level and ending at the required value. For the XML example above, TriplyETL can access the textual content inside the \"name\" key, which itself is nested inside the \"title\" , \"metadata\" , and \"root\" keys. This path is expressed in [1]. Notice that the path expressed in [1] is different from the path expressed in [2], which accesses the textual content of the \"name\" key, but nested inside the \"country\" , \"data\" , and \"root\" keys. (The use of the [0] index is explained in the next section.) [1] root.metadata.title.name.$text [2] root.data.country[0].name.$text Path expressions can be used as string keys in many places in TriplyETL. For example, we can assert the title of a dataset in the following way: etl.use( triple( prefix.dataset('my-dataset'), dct.title, literal('root.metadata.title.name.$text', 'en') ), ) This results in the following assertion: dataset:my-dataset dct:title 'Data about countries.'@en.","title":"Nested keys"},{"location":"triply-etl/extract/xml/#dealing-with-dots-in-keys","text":"In the previous section we saw that dots are used to separate keys in paths. However, sometimes a dot can occur as a regular character inside a key. In such cases, we need to apply additional escaping of the key name to avoid naming conflicts. The example data from the previous section contains XML attribute [1], which is represented by key [2] in the TriplyETL record. [1] country.id [2] [\"@country.id\"] Notice that the dot in [2] is part of the key name. The escape notation [\"...\"] ensures that the dot is not misinterpreted as denoting a sequence of keys. Overall, \u2018a.b\u2019 notation allow going into nested object and accessing values within the nest while \u2018[\u201ca.b\u201d]\u2019 takes value a.b key as a name, therefore does not go into the nest. The following extensive example shows how complex sequences of keys with dots in them can be used: { \"a\": { \"$text\": \"1\" }, \"b\": { \"c\": { \"$text\": \"2\" } }, \"b.c\": { \"$text\": \"3\" }, \"d.d\": { \"e\": { \"$text\": \"4\" }, \"f\": { \"$text\": \"5\" } }, \"g.g\": [ { \"h.h\": { \"$text\": \"6\" } }, { \"h.h\": { \"$text\": \"7\" } } ] } Key Value 'a.$text' 1 'b.c.$text' 2 '[\"b.c\"].$text' 3 '[\"d.d\"].e.$text' 4 '[\"d.d\"].f'.$text' 5 '[\"g.g\"][0][\"h.h\"].$text' 6 '[\"g.g\"][1][\"h.h\"].$text' 7","title":"Dealing with dots in keys"},{"location":"triply-etl/extract/xml/#index-based-list-access","text":"Tree-shaped data formats often allow multiple values to be specified in an ordered list. Examples of this are arrays in JSON and XML elements with the same tag that are directly nested under the same parent element. TriplyETL is able to access specific elements from lists based on their index or position. Following the standard practice in Computer Science, TriplyETL refers to the first element in the list as having index 0. The second element has index 1, etc. For the above example record, we can assert the name of the first country as follows: triple( iri(prefix.country, 'root.data.country[0].[\"@country.id\"]'), rdfs.label, literal('root.data.countries[0].name.$text', 'en') ), This results in the following assertion: country:nl rdfs:label 'The Netherlands'@en. We can also assert the name of the second country. Notice that only the index is different (\u20181\u2019 instead of \u20180\u2019): triple( iri(prefix.country, 'root.data.countries[1].[\"@country.id\"]'), ... This results in the following assertion: country:de rdfs:label 'Germany'@en.","title":"Index-based list access"},{"location":"triply-etl/generic/changelog/","text":"On this page: Changelog TriplyETL 4.8.12 TriplyETL 4.4.7 TriplyETL 4.4.6 [Fixed] Minor clean-ups TriplyETL 4.4.5 [Fixed] TriplyDB-JS instantiation TriplyETL 4.4.0 [Added] linked data event stream middleware (LDES) TriplyETL 4.2.0 [Added] retry mechanism for OAI TriplyETL 4.1.16 [Fixed] SHACL version update and fixing now() in SHACL rules engine TriplyETL 4.1.15 [Fixed] large default timeout for OAI requests [Added] support for parsing multiple RDF/XML docs from OAI TriplyETL 4.1.14 [Fixed] SHACL dependency version TriplyETL 4.1.13 [Fixed] hanging ETL and preparations for a simplified eslint / prettier setup TriplyETL 4.1.12 [Fixed] SHACL executeRules() and added Speedy as a peer dependency TriplyETL 4.1.11 [Added] multiple hashing algorithms to hashedIri() TriplyETL 4.1.10 [Fixed] performance issue with validate() TriplyETL 4.1.9 [Fixed] issue with fromOai() in combination with metadataYieldsRdf TriplyETL 4.1.8 [Added] preventServiceDowntime option to avoid Service downtime in toTriplyDb() function TriplyETL 4.1.7 [Enhanced] Using NamedNodes and/or Literal as content for addHashedIri() Bug fixes TriplyETL 4.1.6 [Fixed] SHACL validate() with SPARQL target returned incorrect results TriplyETL 4.1.2 through 4.1.5 [Enhanced] Improved the timeouts handling for fromOai() extractor TriplyETL 4.1.1 [Changed] executeRules() supports only two arguments [Enhanced] Increased stack size [Added] CLI flag --keep-tmp-dir to save temporary data directory TriplyETL 4.0.0 [Changed] IRI-related middlewares no longer use skolem URLs [Changed] fromShapeFile() is now called fromShapefile() [Removed] Function addRandomIri() [Added] New variables added to ETL TriplyETL 3.1.0 && 3.1.1 [Deprecated] Deprecated fromShapeFile() for fromShapefile() [Deprecated] Deprecated addRandomIri() function. [Enhanced] Improved SHACL report. [Enhanced] Improved objects() function [Enhanced] RML middleware [Enhanced] Static vocabularies [Enhanced] NPM packages [Fixed] Base IRI when using loadRdf() [Fixed] String encoding for IRIs [Fixed] New datatype added to addPoint() middleware TriplyETL 3.0.20 [Enhanced] Improved copySource() function [Enhanced] Prefix uploading TriplyETL 3.0.15 through 3.0.18 [Enhanced] RDF compression before upload [Enhanced] Skolem IRI prefix use Bug fixes TriplyETL 3.0.14 [Added] Support for RML [Build] Environments beyond the DTAP defaults Bug fixes TriplyETL 3.0.7 through 3.0.9 [Added] CLI flag to skip version check [Added] Support for JPath expressions [Added] Authentication for the OAI-PMH extractor [Added] XSLT support for the OAI-PMH extractor Bug fixes TriplyETL 3.0.6 Bug fixes TriplyETL 3.0.5 Bug fixes TriplyETL 3.0.4 [Added] Dataset metadata specification [CLI] Reverse logic for creating error traces Bug fixes TriplyETL 3.0.3 [Changed] Support for the NDE Dataset Register TriplyETL 3.0.2 [Added] Static statement assertion Bug fixes TriplyETL 3.0.1 [Enhanced] Source string validation [Enhanced] Synchronize specific services [Fixed] Bug fixes TriplyETL 3.0.0 [Added] Support for XSLT [Added] Support for the SPARQL Select and SPARQL Ask queries [Enhanced] Simplified usage of 'nestedPairs()' [Changed] Automatic prefix handling in TriplyDB using 'toRdf()' [Changed] New approach to prefix handling in TriplyETL [Changed] New package '@triplyetl/vocabularies' [Changed] RDF serialization parsing with 'loadRdf()' [Changed] Extended log and terminal output for ETL debugging [Changed] 'toRdf()' for account-based token access [Changed] Relocation middleware: 'resetStore()' and 'randomKey()' [Changed] Record selection with '--offset' and '--limit' [Changed] Removal of 'mapQuads()' [Changed] Warning for old Node.JS versions [Changed] SHACL Validation Engine [Changed] Trace for large records [Changed] Transition to in-memory engine Speedy [Enhanced] Improvements to ETL logs [Enhanced] Prevent using multiple extractors [Enhanced] Better error reporting for CSV, TSV, and XML sources. [Enhanced] Default CRS for 'wkt.addPoint()' [Enhanced] Handle conflicting TriplyDB instance specifications [Enhanced] More information for failing HTTP calls Bug fixes TriplyETL 2.0.7 through 2.0.19 Bug fixes TriplyETL 2.0.6 [Added] Support for the PREMIS vocabulary [Added] New debug function logMemory() [Added] Support for the 'ListIdentifiers' verb in the OAI-PMH extractor TriplyETL 2.0.5 [Changed] New default engine for SPARQL Construct [Added] New CLI tool for comparing graphs Bug fixes TriplyETL 2.0.4 [Enhanced] Better output for graph comparison TriplyETL 2.0.3 Bug fixes TriplyETL 2.0.2 Bug fixes TriplyETL 2.0.1 [Added] Timeout flag for TriplyETL Runner TriplyETL 2.0.0 [Changed] Modules infrastructure moves from CommonJS to ESM [Changed] Debug functions move to a new module [Enhanced] Better error messages when things go wrong Bug fixes TriplyETL 1.0.x Changelog \u00b6 You can use this changelog to perform a safe update from an older version of TriplyETL to a newer one. See the documentation for Upgrading TriplyETL repositories for the advised approach, and how the changelog factors into that. TriplyETL 4.8.12 \u00b6 Release date: 2024-06-24 Cleaning up types and upgradeing TriplyDB-js to 8.2.1 TriplyETL 4.4.7 \u00b6 Release date: 2024-07-04 Added logInterval property to fromOai. This is useful in situations where TriplyETL cannot render a progress bar (because the OAI endpoint does not return a total-results value). In such cases you can use this property to render a status update every x number of records. Improved logging information for errors TriplyETL 4.4.6 \u00b6 Release date: 2024-06-24 [Fixed] Minor clean-ups \u00b6 Replacing rdf-js for @rdfjs/types. Adding a resolution for what-wg and rimraf so that there are no deprecation warnings for those modules. Removing some old yarn specific commands, such as pinst. TriplyETL 4.4.5 \u00b6 Release date: 2024-06-12 [Fixed] TriplyDB-JS instantiation \u00b6 Ensure TriplyDB-JS is instantiated properly when proxy settings are passed to TriplyETL TriplyETL 4.4.0 \u00b6 Release date: 2024-06-04 [Added] linked data event stream middleware (LDES) \u00b6 TriplyETL 4.2.0 \u00b6 Release date: 2024-05-10 [Added] retry mechanism for OAI \u00b6 Added retry-mechanism to from-oai. By default, all OAI requests now retry 3 times TriplyETL 4.1.16 \u00b6 Release date: 2024-05-09 [Fixed] SHACL version update and fixing now() in SHACL rules engine \u00b6 TriplyETL 4.1.15 \u00b6 Release date: 2024-05-08 [Fixed] large default timeout for OAI requests \u00b6 Add (very) large default timeout for oai requests, to avoid possibly hanging when server does not respond [Added] support for parsing multiple RDF/XML docs from OAI \u00b6 TriplyETL 4.1.14 \u00b6 Release date: 2024-04-18 [Fixed] SHACL dependency version \u00b6 Update SHACL dependency, with some performance improvements TriplyETL 4.1.13 \u00b6 Release date: 2024-04-18 [Fixed] hanging ETL and preparations for a simplified eslint / prettier setup \u00b6 TriplyETL 4.1.12 \u00b6 Release date: 2024-04-16 [Fixed] SHACL executeRules() and added Speedy as a peer dependency \u00b6 Includes fix regarding SPARQL Functions used in executeRules() , the used Speedy SPARQL engine is now a peer dependency, which will use the same version as the one used as in the @triplyetl/etl package TriplyETL 4.1.11 \u00b6 Release date: 2024-03-27 [Added] multiple hashing algorithms to hashedIri() \u00b6 hashedIri() now supports SHA1, SHA256, SHA384 & SHA512 hashtypes, next to the existing (and still default) MD5 hashtype. See issue #390 . Improvements in the SHACL performance and usage. TriplyETL 4.1.10 \u00b6 Release date: 2024-03-12 [Fixed] performance issue with validate() \u00b6 Because of a previous fix, the validate middleware become slower. This fix makes it performant again. TriplyETL 4.1.9 \u00b6 Release date: 2024-03-12 [Fixed] issue with fromOai() in combination with metadataYieldsRdf \u00b6 Fixed issue where fromOai() middleware reported an error when using metadataYieldsRdf and OAI response contained exactly 1 record. TriplyETL 4.1.8 \u00b6 Release date: 2024-03-10 [Added] preventServiceDowntime option to avoid Service downtime in toTriplyDb() function \u00b6 You can now update services on TriplyDB without experiencing any downtime. Once data uploading is complete, each service will be recreated using a temporary name and the same configuration as the outdated service. Once the temporary service is up and running, the outdated one will be removed. toTriplyDb({dataset: 'my-dataset', opts: { synchronizeServices: ['my-elastic-service', 'my-jena-service'], preventServiceDowntime: true } }) The execution of the above snippet will result in the following console output: Warning Service my-elastic-service of type elasticSearch with status running is out of sync. Info Creating temporary elasticSearch service triplyetl-temp-1710169198327 for my-elastic-service. Warning Service my-jena-service of type jena with status running is out of sync. Info Creating temporary jena service triplyetl-temp-1710169198339 for my-jena-service. Info Swapping service my-jena-service with triplyetl-temp-1710169198339 Info Service my-jena-service updated in 1 minute, Info Swapping service my-elastic-service with triplyetl-temp-1710169198327 Info Service my-elastic-service updated in 2 minutes, 7 seconds TriplyETL 4.1.7 \u00b6 Release date: 2024-03-09 [Enhanced] Using NamedNodes and/or Literal as content for addHashedIri() \u00b6 The addHashedIri() function now considers whether a NamedNode and/or Literal object is utilized to generate a hash. In such cases, the internal JSON representation is no longer employed. Instead, we utilize the value property for a NamedNode or the combination of the value, language, and datatype value properties for a Literal. This enhancement aims to produce more consistent hashed IRIs over time. Bug fixes \u00b6 Using skipRest() in ifElse() and switch() middlewares have caused unexpected ETL execution. TriplyETL 4.1.6 \u00b6 Release date: 2024-03-07 [Fixed] SHACL validate() with SPARQL target returned incorrect results \u00b6 Each shape undergoes conversion to a SHACL Validator object only once during ETL to avoid reloading shapes from disk or string for every record. This approach isn't feasible when SPARQL target nodes alter the model, as it would result in adding those targets for each record. TriplyETL 4.1.2 through 4.1.5 \u00b6 Release date: 2024-03-01 [Enhanced] Improved the timeouts handling for fromOai() extractor \u00b6 This enhancement resolves timeout errors that occurred with requests taking an extended period to respond. By utilizing a custom Fetch Agent from undici package, we've eliminated internal timeouts. TriplyETL 4.1.1 \u00b6 Release date: 2024-02-18 [Changed] executeRules() supports only two arguments \u00b6 To set a maximum number of iterations of the execution of the SHACL rules the maxIterations or errorOnMaxIterations needs to be specified in the executeRules() function. [Enhanced] Increased stack size \u00b6 Maximum call stack size has increased to load the large datasets without trowing an errors. [Added] CLI flag --keep-tmp-dir to save temporary data directory \u00b6 Introduced the cli flag --keep-tmp-dir in order to store all temporary files disregarding the completion status. This allows the user to debug ETL's by studying the intermediate files the ETL has created. TriplyETL 4.0.0 \u00b6 Release date: 2024-01-29 [Changed] IRI-related middlewares no longer use skolem URLs \u00b6 The following middlewares: addHashedIri() , addIri() , addRandomIri() , would no longer allow users to create URLs that have pathnames start with \"/.well-known/genid/\", since they would be consideres skolemised URLs. [Changed] fromShapeFile() is now called fromShapefile() \u00b6 The format is called ESRI Shapefile, hence our extractor function's name had to be changed from fromShapeFile() to fromShapefile() . [Removed] Function addRandomIri() \u00b6 Since function addRandomIri() does not add anything beyond addSkolemIri() , the function has been removed from the TriplyETL library. Random IRIs should be skolem IRIs that can be readily replaced by blank nodes. [Added] New variables added to ETL \u00b6 New flag has been introduced when constructing an ETL: /** * Timeout ETL after set duration in milliseconds */ timeout: number; /** * If set to TRUE, the ETL will do a hard exit, preventing uploads to TDB on timeouts */ exitOnTimeout: boolean; which can be set as following: const etl = new Etl({timeout: 1000, exitOnTimeout: true}) This will cause a hard exit when a timeout occurs and nothing will be executed after this timeout. TriplyETL 3.1.0 && 3.1.1 \u00b6 Release date: 2024-01-15 && 2024-01-17 [Deprecated] Deprecated fromShapeFile() for fromShapefile() \u00b6 [Deprecated] Deprecated addRandomIri() function. \u00b6 Function addRandomIri() does not add anything beyond addSkolemIri() . Random IRIs should be skolem IRIs that can be readily replaced by blank nodes. [Enhanced] Improved SHACL report. \u00b6 When a SHACL shape is used to validate data does by itself not conform to the SHACL-SHACL shape, the report of that non-conforming shape is now printed. [Enhanced] Improved objects() function \u00b6 The objects() middleware now requires a minimum of 2 objects, deviating from its previous behavior, which was limited to functionality similar to the triple() function. [Enhanced] RML middleware \u00b6 RML map() middleware now allows a string Source and a string primitive as input. [Enhanced] Static vocabularies \u00b6 With the latest update, TriplyETL vocabularies are now represented as Vocabulary objects, replacing the previous usage of objects with the type IRI . This change may necessitate adjustments to existing ETLs that utilize static vocabularies, such as aat . In this case, the vocabulary would need to be updated to aat.toIri() to ensure compatibility with the correct type. [Enhanced] NPM packages \u00b6 All NPM packages are up to date with their latest version. [Fixed] Base IRI when using loadRdf() \u00b6 There were some inconsistency between the expected base IRI. For example, the following snippet: import { logQuads } from '@triplyetl/etl/debug' import { Etl, loadRdf, Source } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string('

.')), logQuads(), ) return etl } would result in: { } rather than: { } This issue has been fixed. [Fixed] String encoding for IRIs \u00b6 It is now possible to check whether a value of a key used to create an IRI contains valid characters. A previous warning incorrectly flagged a space (' ') as an invalid character in the IRI, but that has been taken care of that. Now, when you run the script, you won't encounter the misleading warning, providing a more accurate and hassle-free execution. In this case, [1] is resulting in [2] instead of invalid [3]: [1] a b [2] http://ex.com/a%20b [3] http://ex.com/ a b As well as [4] being encoded as [5]: [4] a&b [5] a&b Or [6] can be legitimately encoded in CSV using [7]: [6] a,b [7] \"a,b\" [Fixed] New datatype added to addPoint() middleware \u00b6 Datatype wktLiteral has been added to the addPoint() middleware. TriplyETL 3.0.20 \u00b6 Release date: 2024-01-04 [Enhanced] Improved copySource() function \u00b6 Function etl.copySource() accepts the same destination format as toTriplyDB(), so that the same destination does not need to be specified twice. [Enhanced] Prefix uploading \u00b6 Prefixes are no longer uploaded by default, only explicit prefixes that are defined when constructing an ETL with new Etl({ prefixes }) . TriplyETL 3.0.15 through 3.0.18 \u00b6 Release date: 2023-12-07 through 2023-12-28 [Enhanced] RDF compression before upload \u00b6 It is now possible to enable compression of RDF data before being uploaded to TriplyDB. See the toRdf() function for more information. [Enhanced] Skolem IRI prefix use \u00b6 TriplyETL now emits an error when a Skolem IRI prefix is used with addHashedIri() . Bug fixes \u00b6 This release provides bug fixes to XSLT support (see XSLT Transformations and XSLT Assertions ). TriplyETL 3.0.14 \u00b6 Release date: 2023-12-04 [Added] Support for RML \u00b6 This release introduces support for the RML transformation and assertion language. RML is an ETL configuration language that has gained traction in the linked data community over the last couple of years. See the following pages for more information: RML Transformations RML Assertions [Build] Environments beyond the DTAP defaults \u00b6 It is now possible to extend the standard environments offered by TriplyETL. Bug fixes \u00b6 This release fixes a URL/request-related error in the fromOai extractor. TriplyETL 3.0.7 through 3.0.9 \u00b6 Release date: 2023-11-29 [Added] CLI flag to skip version check \u00b6 Introduced the cli flag --skip-version-check because some users can not use remote connections because of security policies. [Added] Support for JPath expressions \u00b6 toJson() middleware now uses path selectors just as fromXml() , but also JPath expressions. [Added] Authentication for the OAI-PMH extractor \u00b6 fromOai() now accepts a Request object as the value for the url option, allowing more fine graded use of the HTTP request (including authentication information). [Added] XSLT support for the OAI-PMH extractor \u00b6 fromOai() now accepts an optional parameter stylesheet . The argument must be an object with 1 required key source pointing to the source of the stylesheet and 1 optional argument yieldsRdf . When provided the XSL Stylesheet source is processed using the OAI response. The result of the transformation must still be a valid OAI response. Bug fixes \u00b6 Fixes issue where keys that are used for internal administration were shown in logged records, after using the ifElse() and switch() control structures. TriplyETL 3.0.6 \u00b6 Release date: 2023-11-14 Bug fixes \u00b6 This release fixes issues with upstream packages that contained breaking changes. TriplyETL 3.0.5 \u00b6 Release date: 2023-11-09 Bug fixes \u00b6 The progress bar would sometimes terminate at 99% or 101%, instead of the expected 100%. TriplyETL 3.0.4 \u00b6 Release date: 2023-11-07 [Added] Dataset metadata specification \u00b6 It is now possible to use metadata when creating datasets. If a new dataset is created then the metadata is always used, for existing datasets you can choose to ignore, merge or replace existing metadata. This feature is to prevent ETLs accidentally overwriting metadata might have changed in the UI/Dashboard of their TriplyDB instance. The following options are available for this new feature in the opts.existingMetadata parameter: ignore : no metadata will be changed even if no metadata is present for this dataset (this is the default value) merge : only properties that have no value will be overwritten bij the provided metadata replace : all existing metadata will be replaced, even if the provided metadata contains empty keys [CLI] Reverse logic for creating error traces \u00b6 Before this release, running an ETL would always create an error trace file. It was possible to disable this behavior with CLI flag --skip-error-trace . Starting in this release, the error trace file is no longer created by default, and a newly added CLI flag --create-error-trace must now be specified in order ot create the error trace file. Bug fixes \u00b6 The following bugs were fixed: The number of synchronized services was not always reported correctly in CLI output. A package we depend on introduced a breaking change causing a function not to be there anymore. TriplyETL 3.0.3 \u00b6 Release date: 2023-11-01 [Changed] Support for the NDE Dataset Register \u00b6 The code to submit datasets to the NDE Dataset Register has been moved to TriplyDB-JS. The way to publish a dataset now is to add an option to the toTriplyDb() function: { submitToNDEDatasetRegister: true } . Example: toTriplyDb({dataset: 'nde', opts: {submitToNDEDatasetRegister: true}}) TriplyETL 3.0.2 \u00b6 Release date: 2023-10-23 [Added] Static statement assertion \u00b6 export default async function(): Promise { const etl = new Etl({baseIri: Iri('https://example.com/')}) await etl.staticAssertions( pairs( iri(etl.standardGraphs.default), [a, dcat.Dataset], [skos.prefLabel, literal(str(\"Family Doe\"), lang.en)], [dct.created, literal(str(new Date().toISOString()), xsd.dateTime)], ), ); await etl.staticAssertions( pairs(iri(etl.standardGraphs.default), [skos.prefLabel, literal(str(\"Familie Doe\"), lang.nl)]), ); etl.use( fromJson([{ name: \"John Doe\" }, { name: \"Jane Doe\" }]), triple(iri(etl.standardPrefixes.id, \"$recordId\"), sdo.name, \"name\"), logQuads(), ); return etl } You can now assert so called \"static triples\": triples that are not related to the source extractors but should only be asserted once per ETL. Bug fixes \u00b6 There was an error in the ifElse() control structure, that caused ETLs to not use the fallback 'else' block in some situations. TriplyETL 3.0.1 \u00b6 Release date: 2023-10-19 [Enhanced] Source string validation \u00b6 The addLiteral() function can now validate string data that occurs in the Record. Such validation can be used in addition to validation in the Internal Store (graph comparison and SHACL validation). Look at the documentation of addLiteral() for more information. [Enhanced] Synchronize specific services \u00b6 When publishing linked data to TriplyDB, it is now possible to synchronize one specific service. This is specifically useful in case an Acceptance and a Production service are used, and only the former should be synchronized. See the documentation for publishing to remote data destinations for more information. [Fixed] Bug fixes \u00b6 The following bugs have been fixed: The progress bar would sometimes go over 100%. the error report file ( etl.err ) would sometimes contain sentinel keys like $sentinel-${MD5-HASH} . These sentinel keys are used for internal bookkeeping in TriplyETL, and are no longer part of the Record. Some XSLT transformations failed on Windows, because of incorrect redirecting of error messages. TriplyETL 3.0.0 \u00b6 Release date: 2023-10-12 [Added] Support for XSLT \u00b6 Support was added for the Extensible Stylesheet Language Transformations (XSLT) configuration language. This can be used in the following two functions: The fromXml() extractor function, for XML sources that transformed to regular XML. The loadRdf() function, for XML sources that are transformed into RDF/XML. In both cases, this functionality is used by configuring the stylesheet parameter with an XSLT Stylesheet (e.g. using Source.file() ). Example code that uses fromXml() : fromXml(Source.file(XMLFile), { selectors: \"rdf:RDF.sdo:Person\", stylesheet: Source.file(XSLTStylesheet), }), Example code that uses loadRdf() : loadRdf(Source.file(XMLFile), { contentType: \"application/rdf+xml\", stylesheet: Source.file(XSLTStylesheet), }), [Added] Support for the SPARQL Select and SPARQL Ask queries \u00b6 The extractors fromCsv() , fromJson() , fromTsv() and fromXml() now support SPARQL Select queries. The extractors fromJson() and fromXml() also support SPARQL Ask queries. The example below hows how to use a SPARQL ask query in the fromJson() extractor: fromJson(Source.TriplyDb.query('account', 'query-name', { triplyDb: { url: 'https://api.triplydb.com' } })) [Enhanced] Simplified usage of 'nestedPairs()' \u00b6 The nestedPairs() middleware can be used without providing the subject node that connects the pairs to the object/predicate. This will automatically create a Skolem IRI for the subject: nestedPairs(S, P, [a, sdo.Person]) For example: fromJson([{ id: '1', height: 15 }]), addSkolemIri({ prefix: prefix.skolem, key: '_height', }), nestedPairs(iri(prefix.product, 'id'), sdo.height, [qudt.unit, unit.CentiM], [rdf.value, 'height'], ), Will result in the following linked data assertions: product:1 sdo:height [ qudt:unit unit:CentiM; rdf:value 15 ]. [Changed] Automatic prefix handling in TriplyDB using 'toRdf()' \u00b6 Manually specified and standard prefixes are automatically added to TriplyDB when toRdf() is used. The middleware uploadPrefixes() is removed. [Changed] New approach to prefix handling in TriplyETL \u00b6 Prefixes are no longer defined as function that concatenates a value to an Iri. The Iri is a new type of Object in TriplyETL, that has a concat() method which allows you to add a value to the first part of an Iri. For example: const baseIri = Iri('https://example.com/') const prefixId = baseIri.concat('id/') const johnDoe = prefixId.concat('john-doe') [Changed] New package '@triplyetl/vocabularies' \u00b6 The vocabularies and languages are no longer part of @triplyetl/etl package. A new module has been released: @triplyetl/vocabularies : Individual vocabularies like rdf and schema.org can be imported in the following way: import { a, rdf, sdo } from '@triplyetl/vocabularies' To import all vocabularies, use: import * as vocab from \"@triplyetl/vocabularies\" Some vocabularies are too large to include, but they can still be used like this: import { aat } from '@triplyetl/vocabularies' const moustache = aat.concat('300379271') or import { aat } from '@triplyetl/vocabularies' addIri({prefix: aat, content: str('300379271'), key: 'moustache'}) To use the RATT lang tools : Import languages : import { languages } from '@triplyetl/vocabularies' Import languages and region : import { region, language } from '@triplyetl/vocabularies' const nl_BE = language.nl.addRegion(region.BE) [Changed] RDF serialization parsing with 'loadRdf()' \u00b6 The loadRdf() function is able to parse known RDF serializations ( Turtle , TriG , N-Triples , N-Quads ) provided as a string without specifying mimetype. const data = Source.string('...') loadRdf(data) [Changed] Extended log and terminal output for ETL debugging \u00b6 The output of the logfile and terminal output is changed. It contains more information to help users debugging ETL's. The format of time representation is now H:i:s.u where: H : 24-hour format of an hour with leading zeros (00 through 23) i : Minutes with leading zeros (00 to 59) s : Seconds with leading zeros (00 through 59) u : Microseconds (example: 654321) [Changed] 'toRdf()' for account-based token access \u00b6 The toRdf() middleware now accepts \"me\" as account name based on the token. Below are some examples of this being used. toTriplyDb({account: \"me\", dataset: \"myDataset\"}) loadRdf(Source.TriplyDb.rdf(\"me\", datasetName)) Destination.TriplyDb.rdf(\"me\", datasetName) [Changed] Relocation middleware: 'resetStore()' and 'randomKey()' \u00b6 The resetStore() middleware is now moved from ratt to the generic namespace . The randomKey() middleware moved from generic to ratt . [Changed] Record selection with '--offset' and '--limit' \u00b6 You can now use --offset and --limit instead of --from-record-id and --head , e.g. LIMIT=1 OFFSET=8 npx etl . The old arguments can still be used for backwards compatibility. [Changed] Removal of 'mapQuads()' \u00b6 The mapQuads() function was removed. [Changed] Warning for old Node.JS versions \u00b6 If the users Node.JS version is older that the recommended version (currently >=18.0.0) a warning is shown. [Changed] SHACL Validation Engine \u00b6 A SHACL Validation Engine improved performance. [Changed] Trace for large records \u00b6 A new flag now bypasses generating the trace for very large records: ---skip-error-trace . Thus, no trace file is created. [Changed] Transition to in-memory engine Speedy \u00b6 Comunica is no longer part of TriplyETL, the in-memory engine is now Triply's Speedy. [Enhanced] Improvements to ETL logs \u00b6 The logging format was improved by including the following information: the TriplyETL version the Node.js version the DTAP mode the start date and time the end date and time [Enhanced] Prevent using multiple extractors \u00b6 TriplyETL only supports one extractor per ETL configuration object. In the past, it was possible to use multiple extractors, which would result in faulty behavior during ETL runs. Starting in this release, TriplyETL will emit an error when multiple extractors are used. [Enhanced] Better error reporting for CSV, TSV, and XML sources. \u00b6 In previous releases, the extractor functions fromCsv() , fromTsv() , and fromXml() would not emit the file name in case an error occurred. This was specifically problematic when a large number of data source files were used. Starting in this release, the file name is included in error message. [Enhanced] Default CRS for 'wkt.addPoint()' \u00b6 In previous releases, the Coordinate Reference System (CRS) was a required attribute for transformation function wkt.addPoint() . Starting in this release, the CRS argument has become optional. When not specified, the default CRS http://www.opengis.net/def/crs/OGC/1.3/CRS84 is used. [Enhanced] Handle conflicting TriplyDB instance specifications \u00b6 In previous releases, it was possible to introduce an ambiguity in specify the TriplyDB instance to publish data to. This was possible by (1) specifying a TriplyDB API Token in the environment (e.g. though an .env file), and (2) by configuring the triplyDb option in the loadRdf() function. Starting in this release, TriplyETL will emit an error if the TriplyDB instance in the API Token differs from the TriplyDB instance configured in the triplyDb option. [Enhanced] More information for failing HTTP calls \u00b6 In previous releases, when a failing HTTP call resulted in an error message, only the body of that HTTP call would be included in the error message. Starting in this release, the HTTP status code of the failing HTTP call is included in the error message as well. Bug fixes \u00b6 This release fixes several out-of-memory bugs in the SHACL validation function . TriplyETL 2.0.7 through 2.0.19 \u00b6 Release dates: 2023-06-17 through 2023-09-29 Bug fixes \u00b6 The following bugs were fixed: Processing an Excel sheet with fromXml() would sometimes consume too much memory. Several installation issues on Windows have been resolved. The async-saxophone library for XML processing was adjusted to support the current LTS version of Node.js (v18). TriplyETL 2.0.6 \u00b6 Release date: 2023-06-07 [Added] Support for the PREMIS vocabulary \u00b6 Support was added for the PREMIS 3.0.0 vocabulary. This vocabulary is published by the Library of Congress and can be used to publish metadata about the preservation of digital objects. See the PREMIS documentation for more information. The vocabulary can be imported from the 'vocab' module: import { premis } from '@triplyetl/vocabularies' See the documentation on external vocabulary declarations for more information. [Added] New debug function logMemory() \u00b6 A new debug function logMemory() is added. This function prints an overview of the current memory usage of TriplyETL. This allows users to detect fluctuations in memory consumption inside their pipelines. [Added] Support for the 'ListIdentifiers' verb in the OAI-PMH extractor \u00b6 The fromOai() extractor already supported the ListRecords verb. This release adds support for the ListIdentifiers verb as well. This new verb allows users to stream through the headers of all records in an OAI-PMH collection, without requiring the full record (i.e. body) to be retrieved. TriplyETL 2.0.5 \u00b6 Release date: 2023-05-25 [Changed] New default engine for SPARQL Construct \u00b6 The default engine for evaluating SPARQL Construct queries (function construct() ) has changed from Comunica to Speedy. Speedy is a new SPARQL engine that is developed by Triply. Comunica is an open source engine that is developed by the open source community. Since SPARQL is a standardized query language, this change should not cause a difference in behavior for your ETL pipelines. In the unexpected case where an ETL pipeline is negatively affected by this change, the old situation can be restored by explicitly configuring the Comunica engine: import { construct } from '@triplyetl/etl/sparql' construct(Source.TriplyDb.query('my-query'), { sparqlEngine: 'comunica' }), The benefit of switching to the Speedy engine is that this engine is expected to be faster for most queries. Overall, this change will therefore result in speed improvements for your TriplyETL pipelines. [Added] New CLI tool for comparing graphs \u00b6 The new CLI tool compare allows graph comparison to be performed from the command-line. This uses the same algorithm that is used by the compareGraphs() validation function. Bug fixes \u00b6 This release fixes the following bugs: fromXlsx() did not remove trailing whitespace in cell values. When a SHACL result was printed, an incorrect message about a faulty SHACL model was shown. Some RDF processors did not handle empty RDF inputs correctly. TriplyETL 2.0.4 \u00b6 Release date: 2023-05-11 [Enhanced] Better output for graph comparison \u00b6 Before this release, when two graphs were not isomorph and their difference consisted of a mapping from blank nodes onto blank nodes exclusively, an empty difference message was communicated. From this release onwards, the difference message is non-empty, and specifically indicates the difference between the non-isomorphic graphs in terms of the mismatching blank nodes. Look at this example from the graph comparison documentation, which emits such a difference message. TriplyETL 2.0.3 \u00b6 Release date: 2023-05-10 Bug fixes \u00b6 This release includes the following bug fixes: Error location information is not shown in TriplyETL Runner. Issue when a URL data source ( Source.url() ) includes an HTTP body. TriplyETL 2.0.2 \u00b6 Release date: 2023-05-09 Bug fixes \u00b6 This release fixes bugs related to the recent switch from CommonJS to ESM: Dynamic import bug on Windows. Error reporting issues due to ESM imports. TriplyETL 2.0.1 \u00b6 Release date: 2023-05-03 [Added] Timeout flag for TriplyETL Runner \u00b6 The TriplyETL Runner is the CLI tool that is used to run ETL pipelines. Starting with this version, you can specify a --timeout flag when using the TriplyETL Runner. When the indicated timeout is reached before the pipeline finishes, the TriplyETL Runner will gracefully terminate the ETL by acting as if there are no more incoming records. See the TriplyETL Runner documentation page for more information. TriplyETL 2.0.0 \u00b6 Release date: 2023-05-01 [Changed] Modules infrastructure moves from CommonJS to ESM \u00b6 Before this release, TriplyETL used CommonJS modules to modularize its functionality into different components. Starting in this release, ECMAScript Modules (ESM) are used to modularize TriplyETL functionality into different modules. ESM is a more modern approach for modularizing ECMAScript (JavaScript, TypeScript, and Node.js) code. While CommonJS imports are evaluated at runtime, ESM imports are evaluated at compile time. TriplyETL users benefit from this change, since error messages related to module imports will be detected much earlier in the development process. All documentation examples were update to use ESM syntax for module imports, for example: import { logRecord } from '@triplyetl/etl/debug' [Changed] Debug functions move to a new module \u00b6 Before this release, debug functions like logRecord() and startTrace() were part of the RATT module. Since debug functions can be used in combination with any ETL configuration approach, they were moved to a new module. The debug functions are imported from their new module in the following way: import { logRecord, traceEnd, traceStart } from '@triplyetl/etl/debug' [Enhanced] Better error messages when things go wrong \u00b6 This release introduces a new approach for communicating errors back to the user. When TriplyETL functionality detects an error condition, a unified 'trace middleware' is now used to retrieve information from the environment in which the error occurred. This information is then printed to the error output stream for communication with the user. Bug fixes \u00b6 The following bug fixes are included in this release: Incorrect behavior of the _switch() control function . The fromOai() extractor now communicates clearer when the accessed OAI-PMH endpoint encounters any issues. When a key with a NULL value was accessed, the name of that key is now included in the error message. This makes it easier for users to find the NULL value in their source data. TriplyETL 1.0.x \u00b6 TriplyETL 1.0.0 was released on 2023-03-20.","title":"Changelog"},{"location":"triply-etl/generic/changelog/#changelog","text":"You can use this changelog to perform a safe update from an older version of TriplyETL to a newer one. See the documentation for Upgrading TriplyETL repositories for the advised approach, and how the changelog factors into that.","title":"Changelog"},{"location":"triply-etl/generic/changelog/#triplyetl-4812","text":"Release date: 2024-06-24 Cleaning up types and upgradeing TriplyDB-js to 8.2.1","title":"TriplyETL 4.8.12"},{"location":"triply-etl/generic/changelog/#triplyetl-447","text":"Release date: 2024-07-04 Added logInterval property to fromOai. This is useful in situations where TriplyETL cannot render a progress bar (because the OAI endpoint does not return a total-results value). In such cases you can use this property to render a status update every x number of records. Improved logging information for errors","title":"TriplyETL 4.4.7"},{"location":"triply-etl/generic/changelog/#triplyetl-446","text":"Release date: 2024-06-24","title":"TriplyETL 4.4.6"},{"location":"triply-etl/generic/changelog/#fixed-minor-clean-ups","text":"Replacing rdf-js for @rdfjs/types. Adding a resolution for what-wg and rimraf so that there are no deprecation warnings for those modules. Removing some old yarn specific commands, such as pinst.","title":"[Fixed] Minor clean-ups"},{"location":"triply-etl/generic/changelog/#triplyetl-445","text":"Release date: 2024-06-12","title":"TriplyETL 4.4.5"},{"location":"triply-etl/generic/changelog/#fixed-triplydb-js-instantiation","text":"Ensure TriplyDB-JS is instantiated properly when proxy settings are passed to TriplyETL","title":"[Fixed] TriplyDB-JS instantiation"},{"location":"triply-etl/generic/changelog/#triplyetl-440","text":"Release date: 2024-06-04","title":"TriplyETL 4.4.0"},{"location":"triply-etl/generic/changelog/#added-linked-data-event-stream-middleware-ldes","text":"","title":"[Added] linked data event stream middleware (LDES)"},{"location":"triply-etl/generic/changelog/#triplyetl-420","text":"Release date: 2024-05-10","title":"TriplyETL 4.2.0"},{"location":"triply-etl/generic/changelog/#added-retry-mechanism-for-oai","text":"Added retry-mechanism to from-oai. By default, all OAI requests now retry 3 times","title":"[Added] retry mechanism for OAI"},{"location":"triply-etl/generic/changelog/#triplyetl-4116","text":"Release date: 2024-05-09","title":"TriplyETL 4.1.16"},{"location":"triply-etl/generic/changelog/#fixed-shacl-version-update-and-fixing-now-in-shacl-rules-engine","text":"","title":"[Fixed] SHACL version update and fixing now() in SHACL rules engine"},{"location":"triply-etl/generic/changelog/#triplyetl-4115","text":"Release date: 2024-05-08","title":"TriplyETL 4.1.15"},{"location":"triply-etl/generic/changelog/#fixed-large-default-timeout-for-oai-requests","text":"Add (very) large default timeout for oai requests, to avoid possibly hanging when server does not respond","title":"[Fixed] large default timeout for OAI requests"},{"location":"triply-etl/generic/changelog/#added-support-for-parsing-multiple-rdfxml-docs-from-oai","text":"","title":"[Added] support for parsing multiple RDF/XML docs from OAI"},{"location":"triply-etl/generic/changelog/#triplyetl-4114","text":"Release date: 2024-04-18","title":"TriplyETL 4.1.14"},{"location":"triply-etl/generic/changelog/#fixed-shacl-dependency-version","text":"Update SHACL dependency, with some performance improvements","title":"[Fixed] SHACL dependency version"},{"location":"triply-etl/generic/changelog/#triplyetl-4113","text":"Release date: 2024-04-18","title":"TriplyETL 4.1.13"},{"location":"triply-etl/generic/changelog/#fixed-hanging-etl-and-preparations-for-a-simplified-eslint-prettier-setup","text":"","title":"[Fixed] hanging ETL and preparations for a simplified eslint / prettier setup"},{"location":"triply-etl/generic/changelog/#triplyetl-4112","text":"Release date: 2024-04-16","title":"TriplyETL 4.1.12"},{"location":"triply-etl/generic/changelog/#fixed-shacl-executerules-and-added-speedy-as-a-peer-dependency","text":"Includes fix regarding SPARQL Functions used in executeRules() , the used Speedy SPARQL engine is now a peer dependency, which will use the same version as the one used as in the @triplyetl/etl package","title":"[Fixed] SHACL executeRules() and added Speedy as a peer dependency"},{"location":"triply-etl/generic/changelog/#triplyetl-4111","text":"Release date: 2024-03-27","title":"TriplyETL 4.1.11"},{"location":"triply-etl/generic/changelog/#added-multiple-hashing-algorithms-to-hashediri","text":"hashedIri() now supports SHA1, SHA256, SHA384 & SHA512 hashtypes, next to the existing (and still default) MD5 hashtype. See issue #390 . Improvements in the SHACL performance and usage.","title":"[Added] multiple hashing algorithms to hashedIri()"},{"location":"triply-etl/generic/changelog/#triplyetl-4110","text":"Release date: 2024-03-12","title":"TriplyETL 4.1.10"},{"location":"triply-etl/generic/changelog/#fixed-performance-issue-with-validate","text":"Because of a previous fix, the validate middleware become slower. This fix makes it performant again.","title":"[Fixed] performance issue with validate()"},{"location":"triply-etl/generic/changelog/#triplyetl-419","text":"Release date: 2024-03-12","title":"TriplyETL 4.1.9"},{"location":"triply-etl/generic/changelog/#fixed-issue-with-fromoai-in-combination-with-metadatayieldsrdf","text":"Fixed issue where fromOai() middleware reported an error when using metadataYieldsRdf and OAI response contained exactly 1 record.","title":"[Fixed] issue with fromOai() in combination with metadataYieldsRdf"},{"location":"triply-etl/generic/changelog/#triplyetl-418","text":"Release date: 2024-03-10","title":"TriplyETL 4.1.8"},{"location":"triply-etl/generic/changelog/#added-preventservicedowntime-option-to-avoid-service-downtime-in-totriplydb-function","text":"You can now update services on TriplyDB without experiencing any downtime. Once data uploading is complete, each service will be recreated using a temporary name and the same configuration as the outdated service. Once the temporary service is up and running, the outdated one will be removed. toTriplyDb({dataset: 'my-dataset', opts: { synchronizeServices: ['my-elastic-service', 'my-jena-service'], preventServiceDowntime: true } }) The execution of the above snippet will result in the following console output: Warning Service my-elastic-service of type elasticSearch with status running is out of sync. Info Creating temporary elasticSearch service triplyetl-temp-1710169198327 for my-elastic-service. Warning Service my-jena-service of type jena with status running is out of sync. Info Creating temporary jena service triplyetl-temp-1710169198339 for my-jena-service. Info Swapping service my-jena-service with triplyetl-temp-1710169198339 Info Service my-jena-service updated in 1 minute, Info Swapping service my-elastic-service with triplyetl-temp-1710169198327 Info Service my-elastic-service updated in 2 minutes, 7 seconds","title":"[Added] preventServiceDowntime option to avoid Service downtime in toTriplyDb() function"},{"location":"triply-etl/generic/changelog/#triplyetl-417","text":"Release date: 2024-03-09","title":"TriplyETL 4.1.7"},{"location":"triply-etl/generic/changelog/#enhanced-using-namednodes-andor-literal-as-content-for-addhashediri","text":"The addHashedIri() function now considers whether a NamedNode and/or Literal object is utilized to generate a hash. In such cases, the internal JSON representation is no longer employed. Instead, we utilize the value property for a NamedNode or the combination of the value, language, and datatype value properties for a Literal. This enhancement aims to produce more consistent hashed IRIs over time.","title":"[Enhanced] Using NamedNodes and/or Literal as content for addHashedIri()"},{"location":"triply-etl/generic/changelog/#bug-fixes","text":"Using skipRest() in ifElse() and switch() middlewares have caused unexpected ETL execution.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-416","text":"Release date: 2024-03-07","title":"TriplyETL 4.1.6"},{"location":"triply-etl/generic/changelog/#fixed-shacl-validate-with-sparql-target-returned-incorrect-results","text":"Each shape undergoes conversion to a SHACL Validator object only once during ETL to avoid reloading shapes from disk or string for every record. This approach isn't feasible when SPARQL target nodes alter the model, as it would result in adding those targets for each record.","title":"[Fixed] SHACL validate() with SPARQL target returned incorrect results"},{"location":"triply-etl/generic/changelog/#triplyetl-412-through-415","text":"Release date: 2024-03-01","title":"TriplyETL 4.1.2 through 4.1.5"},{"location":"triply-etl/generic/changelog/#enhanced-improved-the-timeouts-handling-for-fromoai-extractor","text":"This enhancement resolves timeout errors that occurred with requests taking an extended period to respond. By utilizing a custom Fetch Agent from undici package, we've eliminated internal timeouts.","title":"[Enhanced] Improved the timeouts handling for fromOai() extractor"},{"location":"triply-etl/generic/changelog/#triplyetl-411","text":"Release date: 2024-02-18","title":"TriplyETL 4.1.1"},{"location":"triply-etl/generic/changelog/#changed-executerules-supports-only-two-arguments","text":"To set a maximum number of iterations of the execution of the SHACL rules the maxIterations or errorOnMaxIterations needs to be specified in the executeRules() function.","title":"[Changed] executeRules() supports only two arguments"},{"location":"triply-etl/generic/changelog/#enhanced-increased-stack-size","text":"Maximum call stack size has increased to load the large datasets without trowing an errors.","title":"[Enhanced] Increased stack size"},{"location":"triply-etl/generic/changelog/#added-cli-flag-keep-tmp-dir-to-save-temporary-data-directory","text":"Introduced the cli flag --keep-tmp-dir in order to store all temporary files disregarding the completion status. This allows the user to debug ETL's by studying the intermediate files the ETL has created.","title":"[Added] CLI flag --keep-tmp-dir to save temporary data directory"},{"location":"triply-etl/generic/changelog/#triplyetl-400","text":"Release date: 2024-01-29","title":"TriplyETL 4.0.0"},{"location":"triply-etl/generic/changelog/#changed-iri-related-middlewares-no-longer-use-skolem-urls","text":"The following middlewares: addHashedIri() , addIri() , addRandomIri() , would no longer allow users to create URLs that have pathnames start with \"/.well-known/genid/\", since they would be consideres skolemised URLs.","title":"[Changed] IRI-related middlewares no longer use skolem URLs"},{"location":"triply-etl/generic/changelog/#changed-fromshapefile-is-now-called-fromshapefile","text":"The format is called ESRI Shapefile, hence our extractor function's name had to be changed from fromShapeFile() to fromShapefile() .","title":"[Changed] fromShapeFile() is now called fromShapefile()"},{"location":"triply-etl/generic/changelog/#removed-function-addrandomiri","text":"Since function addRandomIri() does not add anything beyond addSkolemIri() , the function has been removed from the TriplyETL library. Random IRIs should be skolem IRIs that can be readily replaced by blank nodes.","title":"[Removed] Function addRandomIri()"},{"location":"triply-etl/generic/changelog/#added-new-variables-added-to-etl","text":"New flag has been introduced when constructing an ETL: /** * Timeout ETL after set duration in milliseconds */ timeout: number; /** * If set to TRUE, the ETL will do a hard exit, preventing uploads to TDB on timeouts */ exitOnTimeout: boolean; which can be set as following: const etl = new Etl({timeout: 1000, exitOnTimeout: true}) This will cause a hard exit when a timeout occurs and nothing will be executed after this timeout.","title":"[Added] New variables added to ETL"},{"location":"triply-etl/generic/changelog/#triplyetl-310-311","text":"Release date: 2024-01-15 && 2024-01-17","title":"TriplyETL 3.1.0 && 3.1.1"},{"location":"triply-etl/generic/changelog/#deprecated-deprecated-fromshapefile-for-fromshapefile","text":"","title":"[Deprecated] Deprecated fromShapeFile() for fromShapefile()"},{"location":"triply-etl/generic/changelog/#deprecated-deprecated-addrandomiri-function","text":"Function addRandomIri() does not add anything beyond addSkolemIri() . Random IRIs should be skolem IRIs that can be readily replaced by blank nodes.","title":"[Deprecated] Deprecated addRandomIri() function."},{"location":"triply-etl/generic/changelog/#enhanced-improved-shacl-report","text":"When a SHACL shape is used to validate data does by itself not conform to the SHACL-SHACL shape, the report of that non-conforming shape is now printed.","title":"[Enhanced] Improved SHACL report."},{"location":"triply-etl/generic/changelog/#enhanced-improved-objects-function","text":"The objects() middleware now requires a minimum of 2 objects, deviating from its previous behavior, which was limited to functionality similar to the triple() function.","title":"[Enhanced] Improved objects() function"},{"location":"triply-etl/generic/changelog/#enhanced-rml-middleware","text":"RML map() middleware now allows a string Source and a string primitive as input.","title":"[Enhanced] RML middleware"},{"location":"triply-etl/generic/changelog/#enhanced-static-vocabularies","text":"With the latest update, TriplyETL vocabularies are now represented as Vocabulary objects, replacing the previous usage of objects with the type IRI . This change may necessitate adjustments to existing ETLs that utilize static vocabularies, such as aat . In this case, the vocabulary would need to be updated to aat.toIri() to ensure compatibility with the correct type.","title":"[Enhanced] Static vocabularies"},{"location":"triply-etl/generic/changelog/#enhanced-npm-packages","text":"All NPM packages are up to date with their latest version.","title":"[Enhanced] NPM packages"},{"location":"triply-etl/generic/changelog/#fixed-base-iri-when-using-loadrdf","text":"There were some inconsistency between the expected base IRI. For example, the following snippet: import { logQuads } from '@triplyetl/etl/debug' import { Etl, loadRdf, Source } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string('

.')), logQuads(), ) return etl } would result in: { } rather than: { } This issue has been fixed.","title":"[Fixed] Base IRI when using loadRdf()"},{"location":"triply-etl/generic/changelog/#fixed-string-encoding-for-iris","text":"It is now possible to check whether a value of a key used to create an IRI contains valid characters. A previous warning incorrectly flagged a space (' ') as an invalid character in the IRI, but that has been taken care of that. Now, when you run the script, you won't encounter the misleading warning, providing a more accurate and hassle-free execution. In this case, [1] is resulting in [2] instead of invalid [3]: [1] a b [2] http://ex.com/a%20b [3] http://ex.com/ a b As well as [4] being encoded as [5]: [4] a&b [5] a&b Or [6] can be legitimately encoded in CSV using [7]: [6] a,b [7] \"a,b\"","title":"[Fixed] String encoding for IRIs"},{"location":"triply-etl/generic/changelog/#fixed-new-datatype-added-to-addpoint-middleware","text":"Datatype wktLiteral has been added to the addPoint() middleware.","title":"[Fixed] New datatype added to addPoint() middleware"},{"location":"triply-etl/generic/changelog/#triplyetl-3020","text":"Release date: 2024-01-04","title":"TriplyETL 3.0.20"},{"location":"triply-etl/generic/changelog/#enhanced-improved-copysource-function","text":"Function etl.copySource() accepts the same destination format as toTriplyDB(), so that the same destination does not need to be specified twice.","title":"[Enhanced] Improved copySource() function"},{"location":"triply-etl/generic/changelog/#enhanced-prefix-uploading","text":"Prefixes are no longer uploaded by default, only explicit prefixes that are defined when constructing an ETL with new Etl({ prefixes }) .","title":"[Enhanced] Prefix uploading"},{"location":"triply-etl/generic/changelog/#triplyetl-3015-through-3018","text":"Release date: 2023-12-07 through 2023-12-28","title":"TriplyETL 3.0.15 through 3.0.18"},{"location":"triply-etl/generic/changelog/#enhanced-rdf-compression-before-upload","text":"It is now possible to enable compression of RDF data before being uploaded to TriplyDB. See the toRdf() function for more information.","title":"[Enhanced] RDF compression before upload"},{"location":"triply-etl/generic/changelog/#enhanced-skolem-iri-prefix-use","text":"TriplyETL now emits an error when a Skolem IRI prefix is used with addHashedIri() .","title":"[Enhanced] Skolem IRI prefix use"},{"location":"triply-etl/generic/changelog/#bug-fixes_1","text":"This release provides bug fixes to XSLT support (see XSLT Transformations and XSLT Assertions ).","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-3014","text":"Release date: 2023-12-04","title":"TriplyETL 3.0.14"},{"location":"triply-etl/generic/changelog/#added-support-for-rml","text":"This release introduces support for the RML transformation and assertion language. RML is an ETL configuration language that has gained traction in the linked data community over the last couple of years. See the following pages for more information: RML Transformations RML Assertions","title":"[Added] Support for RML"},{"location":"triply-etl/generic/changelog/#build-environments-beyond-the-dtap-defaults","text":"It is now possible to extend the standard environments offered by TriplyETL.","title":"[Build] Environments beyond the DTAP defaults"},{"location":"triply-etl/generic/changelog/#bug-fixes_2","text":"This release fixes a URL/request-related error in the fromOai extractor.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-307-through-309","text":"Release date: 2023-11-29","title":"TriplyETL 3.0.7 through 3.0.9"},{"location":"triply-etl/generic/changelog/#added-cli-flag-to-skip-version-check","text":"Introduced the cli flag --skip-version-check because some users can not use remote connections because of security policies.","title":"[Added] CLI flag to skip version check"},{"location":"triply-etl/generic/changelog/#added-support-for-jpath-expressions","text":"toJson() middleware now uses path selectors just as fromXml() , but also JPath expressions.","title":"[Added] Support for JPath expressions"},{"location":"triply-etl/generic/changelog/#added-authentication-for-the-oai-pmh-extractor","text":"fromOai() now accepts a Request object as the value for the url option, allowing more fine graded use of the HTTP request (including authentication information).","title":"[Added] Authentication for the OAI-PMH extractor"},{"location":"triply-etl/generic/changelog/#added-xslt-support-for-the-oai-pmh-extractor","text":"fromOai() now accepts an optional parameter stylesheet . The argument must be an object with 1 required key source pointing to the source of the stylesheet and 1 optional argument yieldsRdf . When provided the XSL Stylesheet source is processed using the OAI response. The result of the transformation must still be a valid OAI response.","title":"[Added] XSLT support for the OAI-PMH extractor"},{"location":"triply-etl/generic/changelog/#bug-fixes_3","text":"Fixes issue where keys that are used for internal administration were shown in logged records, after using the ifElse() and switch() control structures.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-306","text":"Release date: 2023-11-14","title":"TriplyETL 3.0.6"},{"location":"triply-etl/generic/changelog/#bug-fixes_4","text":"This release fixes issues with upstream packages that contained breaking changes.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-305","text":"Release date: 2023-11-09","title":"TriplyETL 3.0.5"},{"location":"triply-etl/generic/changelog/#bug-fixes_5","text":"The progress bar would sometimes terminate at 99% or 101%, instead of the expected 100%.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-304","text":"Release date: 2023-11-07","title":"TriplyETL 3.0.4"},{"location":"triply-etl/generic/changelog/#added-dataset-metadata-specification","text":"It is now possible to use metadata when creating datasets. If a new dataset is created then the metadata is always used, for existing datasets you can choose to ignore, merge or replace existing metadata. This feature is to prevent ETLs accidentally overwriting metadata might have changed in the UI/Dashboard of their TriplyDB instance. The following options are available for this new feature in the opts.existingMetadata parameter: ignore : no metadata will be changed even if no metadata is present for this dataset (this is the default value) merge : only properties that have no value will be overwritten bij the provided metadata replace : all existing metadata will be replaced, even if the provided metadata contains empty keys","title":"[Added] Dataset metadata specification"},{"location":"triply-etl/generic/changelog/#cli-reverse-logic-for-creating-error-traces","text":"Before this release, running an ETL would always create an error trace file. It was possible to disable this behavior with CLI flag --skip-error-trace . Starting in this release, the error trace file is no longer created by default, and a newly added CLI flag --create-error-trace must now be specified in order ot create the error trace file.","title":"[CLI] Reverse logic for creating error traces"},{"location":"triply-etl/generic/changelog/#bug-fixes_6","text":"The following bugs were fixed: The number of synchronized services was not always reported correctly in CLI output. A package we depend on introduced a breaking change causing a function not to be there anymore.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-303","text":"Release date: 2023-11-01","title":"TriplyETL 3.0.3"},{"location":"triply-etl/generic/changelog/#changed-support-for-the-nde-dataset-register","text":"The code to submit datasets to the NDE Dataset Register has been moved to TriplyDB-JS. The way to publish a dataset now is to add an option to the toTriplyDb() function: { submitToNDEDatasetRegister: true } . Example: toTriplyDb({dataset: 'nde', opts: {submitToNDEDatasetRegister: true}})","title":"[Changed] Support for the NDE Dataset Register"},{"location":"triply-etl/generic/changelog/#triplyetl-302","text":"Release date: 2023-10-23","title":"TriplyETL 3.0.2"},{"location":"triply-etl/generic/changelog/#added-static-statement-assertion","text":"export default async function(): Promise { const etl = new Etl({baseIri: Iri('https://example.com/')}) await etl.staticAssertions( pairs( iri(etl.standardGraphs.default), [a, dcat.Dataset], [skos.prefLabel, literal(str(\"Family Doe\"), lang.en)], [dct.created, literal(str(new Date().toISOString()), xsd.dateTime)], ), ); await etl.staticAssertions( pairs(iri(etl.standardGraphs.default), [skos.prefLabel, literal(str(\"Familie Doe\"), lang.nl)]), ); etl.use( fromJson([{ name: \"John Doe\" }, { name: \"Jane Doe\" }]), triple(iri(etl.standardPrefixes.id, \"$recordId\"), sdo.name, \"name\"), logQuads(), ); return etl } You can now assert so called \"static triples\": triples that are not related to the source extractors but should only be asserted once per ETL.","title":"[Added] Static statement assertion"},{"location":"triply-etl/generic/changelog/#bug-fixes_7","text":"There was an error in the ifElse() control structure, that caused ETLs to not use the fallback 'else' block in some situations.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-301","text":"Release date: 2023-10-19","title":"TriplyETL 3.0.1"},{"location":"triply-etl/generic/changelog/#enhanced-source-string-validation","text":"The addLiteral() function can now validate string data that occurs in the Record. Such validation can be used in addition to validation in the Internal Store (graph comparison and SHACL validation). Look at the documentation of addLiteral() for more information.","title":"[Enhanced] Source string validation"},{"location":"triply-etl/generic/changelog/#enhanced-synchronize-specific-services","text":"When publishing linked data to TriplyDB, it is now possible to synchronize one specific service. This is specifically useful in case an Acceptance and a Production service are used, and only the former should be synchronized. See the documentation for publishing to remote data destinations for more information.","title":"[Enhanced] Synchronize specific services"},{"location":"triply-etl/generic/changelog/#fixed-bug-fixes","text":"The following bugs have been fixed: The progress bar would sometimes go over 100%. the error report file ( etl.err ) would sometimes contain sentinel keys like $sentinel-${MD5-HASH} . These sentinel keys are used for internal bookkeeping in TriplyETL, and are no longer part of the Record. Some XSLT transformations failed on Windows, because of incorrect redirecting of error messages.","title":"[Fixed] Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-300","text":"Release date: 2023-10-12","title":"TriplyETL 3.0.0"},{"location":"triply-etl/generic/changelog/#added-support-for-xslt","text":"Support was added for the Extensible Stylesheet Language Transformations (XSLT) configuration language. This can be used in the following two functions: The fromXml() extractor function, for XML sources that transformed to regular XML. The loadRdf() function, for XML sources that are transformed into RDF/XML. In both cases, this functionality is used by configuring the stylesheet parameter with an XSLT Stylesheet (e.g. using Source.file() ). Example code that uses fromXml() : fromXml(Source.file(XMLFile), { selectors: \"rdf:RDF.sdo:Person\", stylesheet: Source.file(XSLTStylesheet), }), Example code that uses loadRdf() : loadRdf(Source.file(XMLFile), { contentType: \"application/rdf+xml\", stylesheet: Source.file(XSLTStylesheet), }),","title":"[Added] Support for XSLT"},{"location":"triply-etl/generic/changelog/#added-support-for-the-sparql-select-and-sparql-ask-queries","text":"The extractors fromCsv() , fromJson() , fromTsv() and fromXml() now support SPARQL Select queries. The extractors fromJson() and fromXml() also support SPARQL Ask queries. The example below hows how to use a SPARQL ask query in the fromJson() extractor: fromJson(Source.TriplyDb.query('account', 'query-name', { triplyDb: { url: 'https://api.triplydb.com' } }))","title":"[Added] Support for the SPARQL Select and SPARQL Ask queries"},{"location":"triply-etl/generic/changelog/#enhanced-simplified-usage-of-nestedpairs","text":"The nestedPairs() middleware can be used without providing the subject node that connects the pairs to the object/predicate. This will automatically create a Skolem IRI for the subject: nestedPairs(S, P, [a, sdo.Person]) For example: fromJson([{ id: '1', height: 15 }]), addSkolemIri({ prefix: prefix.skolem, key: '_height', }), nestedPairs(iri(prefix.product, 'id'), sdo.height, [qudt.unit, unit.CentiM], [rdf.value, 'height'], ), Will result in the following linked data assertions: product:1 sdo:height [ qudt:unit unit:CentiM; rdf:value 15 ].","title":"[Enhanced] Simplified usage of 'nestedPairs()'"},{"location":"triply-etl/generic/changelog/#changed-automatic-prefix-handling-in-triplydb-using-tordf","text":"Manually specified and standard prefixes are automatically added to TriplyDB when toRdf() is used. The middleware uploadPrefixes() is removed.","title":"[Changed] Automatic prefix handling in TriplyDB using 'toRdf()'"},{"location":"triply-etl/generic/changelog/#changed-new-approach-to-prefix-handling-in-triplyetl","text":"Prefixes are no longer defined as function that concatenates a value to an Iri. The Iri is a new type of Object in TriplyETL, that has a concat() method which allows you to add a value to the first part of an Iri. For example: const baseIri = Iri('https://example.com/') const prefixId = baseIri.concat('id/') const johnDoe = prefixId.concat('john-doe')","title":"[Changed] New approach to prefix handling in TriplyETL"},{"location":"triply-etl/generic/changelog/#changed-new-package-triplyetlvocabularies","text":"The vocabularies and languages are no longer part of @triplyetl/etl package. A new module has been released: @triplyetl/vocabularies : Individual vocabularies like rdf and schema.org can be imported in the following way: import { a, rdf, sdo } from '@triplyetl/vocabularies' To import all vocabularies, use: import * as vocab from \"@triplyetl/vocabularies\" Some vocabularies are too large to include, but they can still be used like this: import { aat } from '@triplyetl/vocabularies' const moustache = aat.concat('300379271') or import { aat } from '@triplyetl/vocabularies' addIri({prefix: aat, content: str('300379271'), key: 'moustache'}) To use the RATT lang tools : Import languages : import { languages } from '@triplyetl/vocabularies' Import languages and region : import { region, language } from '@triplyetl/vocabularies' const nl_BE = language.nl.addRegion(region.BE)","title":"[Changed] New package '@triplyetl/vocabularies'"},{"location":"triply-etl/generic/changelog/#changed-rdf-serialization-parsing-with-loadrdf","text":"The loadRdf() function is able to parse known RDF serializations ( Turtle , TriG , N-Triples , N-Quads ) provided as a string without specifying mimetype. const data = Source.string('...') loadRdf(data)","title":"[Changed] RDF serialization parsing with 'loadRdf()'"},{"location":"triply-etl/generic/changelog/#changed-extended-log-and-terminal-output-for-etl-debugging","text":"The output of the logfile and terminal output is changed. It contains more information to help users debugging ETL's. The format of time representation is now H:i:s.u where: H : 24-hour format of an hour with leading zeros (00 through 23) i : Minutes with leading zeros (00 to 59) s : Seconds with leading zeros (00 through 59) u : Microseconds (example: 654321)","title":"[Changed] Extended log and terminal output for ETL debugging"},{"location":"triply-etl/generic/changelog/#changed-tordf-for-account-based-token-access","text":"The toRdf() middleware now accepts \"me\" as account name based on the token. Below are some examples of this being used. toTriplyDb({account: \"me\", dataset: \"myDataset\"}) loadRdf(Source.TriplyDb.rdf(\"me\", datasetName)) Destination.TriplyDb.rdf(\"me\", datasetName)","title":"[Changed] 'toRdf()' for account-based token access"},{"location":"triply-etl/generic/changelog/#changed-relocation-middleware-resetstore-and-randomkey","text":"The resetStore() middleware is now moved from ratt to the generic namespace . The randomKey() middleware moved from generic to ratt .","title":"[Changed] Relocation middleware: 'resetStore()' and 'randomKey()'"},{"location":"triply-etl/generic/changelog/#changed-record-selection-with-offset-and-limit","text":"You can now use --offset and --limit instead of --from-record-id and --head , e.g. LIMIT=1 OFFSET=8 npx etl . The old arguments can still be used for backwards compatibility.","title":"[Changed] Record selection with '--offset' and '--limit'"},{"location":"triply-etl/generic/changelog/#changed-removal-of-mapquads","text":"The mapQuads() function was removed.","title":"[Changed] Removal of 'mapQuads()'"},{"location":"triply-etl/generic/changelog/#changed-warning-for-old-nodejs-versions","text":"If the users Node.JS version is older that the recommended version (currently >=18.0.0) a warning is shown.","title":"[Changed] Warning for old Node.JS versions"},{"location":"triply-etl/generic/changelog/#changed-shacl-validation-engine","text":"A SHACL Validation Engine improved performance.","title":"[Changed] SHACL Validation Engine"},{"location":"triply-etl/generic/changelog/#changed-trace-for-large-records","text":"A new flag now bypasses generating the trace for very large records: ---skip-error-trace . Thus, no trace file is created.","title":"[Changed] Trace for large records"},{"location":"triply-etl/generic/changelog/#changed-transition-to-in-memory-engine-speedy","text":"Comunica is no longer part of TriplyETL, the in-memory engine is now Triply's Speedy.","title":"[Changed] Transition to in-memory engine Speedy"},{"location":"triply-etl/generic/changelog/#enhanced-improvements-to-etl-logs","text":"The logging format was improved by including the following information: the TriplyETL version the Node.js version the DTAP mode the start date and time the end date and time","title":"[Enhanced] Improvements to ETL logs"},{"location":"triply-etl/generic/changelog/#enhanced-prevent-using-multiple-extractors","text":"TriplyETL only supports one extractor per ETL configuration object. In the past, it was possible to use multiple extractors, which would result in faulty behavior during ETL runs. Starting in this release, TriplyETL will emit an error when multiple extractors are used.","title":"[Enhanced] Prevent using multiple extractors"},{"location":"triply-etl/generic/changelog/#enhanced-better-error-reporting-for-csv-tsv-and-xml-sources","text":"In previous releases, the extractor functions fromCsv() , fromTsv() , and fromXml() would not emit the file name in case an error occurred. This was specifically problematic when a large number of data source files were used. Starting in this release, the file name is included in error message.","title":"[Enhanced] Better error reporting for CSV, TSV, and XML sources."},{"location":"triply-etl/generic/changelog/#enhanced-default-crs-for-wktaddpoint","text":"In previous releases, the Coordinate Reference System (CRS) was a required attribute for transformation function wkt.addPoint() . Starting in this release, the CRS argument has become optional. When not specified, the default CRS http://www.opengis.net/def/crs/OGC/1.3/CRS84 is used.","title":"[Enhanced] Default CRS for 'wkt.addPoint()'"},{"location":"triply-etl/generic/changelog/#enhanced-handle-conflicting-triplydb-instance-specifications","text":"In previous releases, it was possible to introduce an ambiguity in specify the TriplyDB instance to publish data to. This was possible by (1) specifying a TriplyDB API Token in the environment (e.g. though an .env file), and (2) by configuring the triplyDb option in the loadRdf() function. Starting in this release, TriplyETL will emit an error if the TriplyDB instance in the API Token differs from the TriplyDB instance configured in the triplyDb option.","title":"[Enhanced] Handle conflicting TriplyDB instance specifications"},{"location":"triply-etl/generic/changelog/#enhanced-more-information-for-failing-http-calls","text":"In previous releases, when a failing HTTP call resulted in an error message, only the body of that HTTP call would be included in the error message. Starting in this release, the HTTP status code of the failing HTTP call is included in the error message as well.","title":"[Enhanced] More information for failing HTTP calls"},{"location":"triply-etl/generic/changelog/#bug-fixes_8","text":"This release fixes several out-of-memory bugs in the SHACL validation function .","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-207-through-2019","text":"Release dates: 2023-06-17 through 2023-09-29","title":"TriplyETL 2.0.7 through 2.0.19"},{"location":"triply-etl/generic/changelog/#bug-fixes_9","text":"The following bugs were fixed: Processing an Excel sheet with fromXml() would sometimes consume too much memory. Several installation issues on Windows have been resolved. The async-saxophone library for XML processing was adjusted to support the current LTS version of Node.js (v18).","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-206","text":"Release date: 2023-06-07","title":"TriplyETL 2.0.6"},{"location":"triply-etl/generic/changelog/#added-support-for-the-premis-vocabulary","text":"Support was added for the PREMIS 3.0.0 vocabulary. This vocabulary is published by the Library of Congress and can be used to publish metadata about the preservation of digital objects. See the PREMIS documentation for more information. The vocabulary can be imported from the 'vocab' module: import { premis } from '@triplyetl/vocabularies' See the documentation on external vocabulary declarations for more information.","title":"[Added] Support for the PREMIS vocabulary"},{"location":"triply-etl/generic/changelog/#added-new-debug-function-logmemory","text":"A new debug function logMemory() is added. This function prints an overview of the current memory usage of TriplyETL. This allows users to detect fluctuations in memory consumption inside their pipelines.","title":"[Added] New debug function logMemory()"},{"location":"triply-etl/generic/changelog/#added-support-for-the-listidentifiers-verb-in-the-oai-pmh-extractor","text":"The fromOai() extractor already supported the ListRecords verb. This release adds support for the ListIdentifiers verb as well. This new verb allows users to stream through the headers of all records in an OAI-PMH collection, without requiring the full record (i.e. body) to be retrieved.","title":"[Added] Support for the 'ListIdentifiers' verb in the OAI-PMH extractor"},{"location":"triply-etl/generic/changelog/#triplyetl-205","text":"Release date: 2023-05-25","title":"TriplyETL 2.0.5"},{"location":"triply-etl/generic/changelog/#changed-new-default-engine-for-sparql-construct","text":"The default engine for evaluating SPARQL Construct queries (function construct() ) has changed from Comunica to Speedy. Speedy is a new SPARQL engine that is developed by Triply. Comunica is an open source engine that is developed by the open source community. Since SPARQL is a standardized query language, this change should not cause a difference in behavior for your ETL pipelines. In the unexpected case where an ETL pipeline is negatively affected by this change, the old situation can be restored by explicitly configuring the Comunica engine: import { construct } from '@triplyetl/etl/sparql' construct(Source.TriplyDb.query('my-query'), { sparqlEngine: 'comunica' }), The benefit of switching to the Speedy engine is that this engine is expected to be faster for most queries. Overall, this change will therefore result in speed improvements for your TriplyETL pipelines.","title":"[Changed] New default engine for SPARQL Construct"},{"location":"triply-etl/generic/changelog/#added-new-cli-tool-for-comparing-graphs","text":"The new CLI tool compare allows graph comparison to be performed from the command-line. This uses the same algorithm that is used by the compareGraphs() validation function.","title":"[Added] New CLI tool for comparing graphs"},{"location":"triply-etl/generic/changelog/#bug-fixes_10","text":"This release fixes the following bugs: fromXlsx() did not remove trailing whitespace in cell values. When a SHACL result was printed, an incorrect message about a faulty SHACL model was shown. Some RDF processors did not handle empty RDF inputs correctly.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-204","text":"Release date: 2023-05-11","title":"TriplyETL 2.0.4"},{"location":"triply-etl/generic/changelog/#enhanced-better-output-for-graph-comparison","text":"Before this release, when two graphs were not isomorph and their difference consisted of a mapping from blank nodes onto blank nodes exclusively, an empty difference message was communicated. From this release onwards, the difference message is non-empty, and specifically indicates the difference between the non-isomorphic graphs in terms of the mismatching blank nodes. Look at this example from the graph comparison documentation, which emits such a difference message.","title":"[Enhanced] Better output for graph comparison"},{"location":"triply-etl/generic/changelog/#triplyetl-203","text":"Release date: 2023-05-10","title":"TriplyETL 2.0.3"},{"location":"triply-etl/generic/changelog/#bug-fixes_11","text":"This release includes the following bug fixes: Error location information is not shown in TriplyETL Runner. Issue when a URL data source ( Source.url() ) includes an HTTP body.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-202","text":"Release date: 2023-05-09","title":"TriplyETL 2.0.2"},{"location":"triply-etl/generic/changelog/#bug-fixes_12","text":"This release fixes bugs related to the recent switch from CommonJS to ESM: Dynamic import bug on Windows. Error reporting issues due to ESM imports.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-201","text":"Release date: 2023-05-03","title":"TriplyETL 2.0.1"},{"location":"triply-etl/generic/changelog/#added-timeout-flag-for-triplyetl-runner","text":"The TriplyETL Runner is the CLI tool that is used to run ETL pipelines. Starting with this version, you can specify a --timeout flag when using the TriplyETL Runner. When the indicated timeout is reached before the pipeline finishes, the TriplyETL Runner will gracefully terminate the ETL by acting as if there are no more incoming records. See the TriplyETL Runner documentation page for more information.","title":"[Added] Timeout flag for TriplyETL Runner"},{"location":"triply-etl/generic/changelog/#triplyetl-200","text":"Release date: 2023-05-01","title":"TriplyETL 2.0.0"},{"location":"triply-etl/generic/changelog/#changed-modules-infrastructure-moves-from-commonjs-to-esm","text":"Before this release, TriplyETL used CommonJS modules to modularize its functionality into different components. Starting in this release, ECMAScript Modules (ESM) are used to modularize TriplyETL functionality into different modules. ESM is a more modern approach for modularizing ECMAScript (JavaScript, TypeScript, and Node.js) code. While CommonJS imports are evaluated at runtime, ESM imports are evaluated at compile time. TriplyETL users benefit from this change, since error messages related to module imports will be detected much earlier in the development process. All documentation examples were update to use ESM syntax for module imports, for example: import { logRecord } from '@triplyetl/etl/debug'","title":"[Changed] Modules infrastructure moves from CommonJS to ESM"},{"location":"triply-etl/generic/changelog/#changed-debug-functions-move-to-a-new-module","text":"Before this release, debug functions like logRecord() and startTrace() were part of the RATT module. Since debug functions can be used in combination with any ETL configuration approach, they were moved to a new module. The debug functions are imported from their new module in the following way: import { logRecord, traceEnd, traceStart } from '@triplyetl/etl/debug'","title":"[Changed] Debug functions move to a new module"},{"location":"triply-etl/generic/changelog/#enhanced-better-error-messages-when-things-go-wrong","text":"This release introduces a new approach for communicating errors back to the user. When TriplyETL functionality detects an error condition, a unified 'trace middleware' is now used to retrieve information from the environment in which the error occurred. This information is then printed to the error output stream for communication with the user.","title":"[Enhanced] Better error messages when things go wrong"},{"location":"triply-etl/generic/changelog/#bug-fixes_13","text":"The following bug fixes are included in this release: Incorrect behavior of the _switch() control function . The fromOai() extractor now communicates clearer when the accessed OAI-PMH endpoint encounters any issues. When a key with a NULL value was accessed, the name of that key is now included in the error message. This makes it easier for users to find the NULL value in their source data.","title":"Bug fixes"},{"location":"triply-etl/generic/changelog/#triplyetl-10x","text":"TriplyETL 1.0.0 was released on 2023-03-20.","title":"TriplyETL 1.0.x"},{"location":"triply-etl/generic/cli/","text":"On this page: Command Line Interface (CLI) Installing dependencies Transpiling to JavaScript TriplyETL Runner Output summary Limit the number of records Specify a range of records Process a specific record Set a timeout Verbose mode Secure verbose mode TriplyETL Tools Compare Create TriplyDB API Token Print TriplyDB API Token Validate Command Line Interface (CLI) \u00b6 TriplyETL allows you to manually perform various tasks in a terminal application (a Command-Line Interface or CLI). Installing dependencies must be repeated when dependencies were changed. Transpiling to JavaScript must be repeated when one or more TypeScript files are changed. TriplyETL Runner allows you to manually run local TriplyETL projects in your terminal. TriplyETL Tools explains how you can perform common ETL tasks. Installing dependencies \u00b6 When you work on an existing TriplyETL project, you sometimes pull in changes made by your team members. Such changes are typically obtained by running the following Git command: git pull This command prints a list of files that were changed by your team members. If this list includes changes to the file package.json , this means that one or more dependencies were changed. In order to effectuate these changes in your local copy of the TriplyETL project, you must run the following command: npm i Transpiling to JavaScript \u00b6 When you make changes to one or more TypeScript files, the corresponding JavaScript files will have become outdated. If you now use the TriplyETL Runner , it will use one or more outdated JavaScript files, and will not take into account your most recent changes to the TypeScript files. In order to keep your JavaScript files up-to-date relative to your TypeScript files, you must run the following command after making changes to TypeScript files: npm run build If you edit your TypeScript files repeatedly, having to run this extra command may get tedious. In such cases, you can run the following command to automatically perform the transpile step in the background: npm run dev Notice that this prevents you from using the terminal application for new commands. It is typical to open a new terminal application window, and run the npx etl command from there. TriplyETL Runner \u00b6 The TriplyETL Runner allows you to run a local TriplyETL project in your terminal application. We assume that you have a local TriplyETL project in which you can successfully run the npx etl command. Follow the Getting Started instructions for TriplyETL Runner if this is not yet the case. Run the following command to run the ETL pipeline: npx etl This command implicitly uses the file lib/main.js , which is the transpiled JavaScript file that corresponds to the TypeScript file src/main.ts . The following command has the same behavior, but makes explicit which file is used: npx etl lib/main.js Some TriplyETL projects have multiple top-level scripts. In such cases, it is possible to run each of these scripts individually as follows: npx etl lib/some-script.js Output summary \u00b6 TriplyETL Runner will start processing data. Depending on the size of the data source, the Runner may take more or less time to finish. When the Runner finishes successfully, it will print the following summary: \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 Etl: #Error 0 | #Warning 0 | #Info 0 \u2502 \u2502 #Statements 2 \u2502 \u2502 #Records 2 \u2502 \u2502 Started at 2023-06-18 10:05:20 \u2502 \u2502 Runtime 0 sec \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 This summary includes the following information: \"#Error\" shows the number of errors encountered. With default settings, this number is at most 1, since the Runner will immediately stop after an error occurs. \"#Warning\" shows the number of warnings encountered. With default settings, this includes warnings emitted by the SHACL Validator . \"#Info\" shows the number of informational messages. With default settings, this includes informational messages emitted by the SHACL Validator . \"#Statements\" shows the number of triples or quads that was generated. This number is equal to or higher than the number of statements that is uploaded to the triple store. The reason for this is that TriplyETL processes records in parallel. If the same statement is generated for two records, the number of statements with be incremented by 2, but only 1 unique statement will be uploaded to the triple store. \"#Records\" shows the number of records that was processed. \"Started at\" shows the date and time at which the Runner started. \"Runtime\" shows the wall time duration of the run. Limit the number of records \u00b6 When developing a pipeline, it is almost never necessary to process all records from the source data. Instead, it is common to run the ETL for a small number of example record, which results in quick feedback. The --head flag indicates the maximum number of records that is processed by the Runner: npx etl --head 1 npx etl --head 10 These commands run the ETL for the first record (if one is available) and for the first 10 records (if these are available). Specify a range of records \u00b6 When developing a pipeline over a large source data collection, it is often standard practice to use the first 10 or 100 records most of the time. The benefit of this approach is that the feedback loop between making changes and receiving feedback is short. A downside of this approach is that the ETL may be overly optimized towards these first few records. For example, if a value is missing in the first 1.000 records, then transformations that are necessary for when the value is present will not be developed initially. An alternative is to run the entire ETL, but that takes a long time. To avoid the downsides of using --head , TriplyETL also supports the --from-record-id flag. This flag specifies the number of records that are skipped. This allows us to specify an arbitrary consecutive range of records. For example, the following processes the 1.001-st until and including the 1.010-th record: npx etl --from-record-id 1000 --head 10 Process a specific record \u00b6 When the --head flag is set to 1, the --from-record-id flag specifies the index of a single specific record that is processed. This is useful when a record is known to be problematic, for instance during debugging. The following command runs TriplyETL for the 27th record: npx etl --from-record-id 26 --head 1 Set a timeout \u00b6 For large ETL pipelines, it is sometimes useful to specify a maximum duration for which the TriplyETL Runner is allowed to run. In such cases, the --timeout flag can be used. The --timeout option accepts human-readable duration strings, such as '1h 30m 5s', '1hr', '1 hour', or '3hrs'. When the indicated timeout is reached before the pipeline finishes, the TriplyETL Runner will gracefully terminate the ETL by acting as if there are no more incoming records. As a result, the Runner will upload all linked data (graphs) that was produced up to that point, and it will write a performance log. For TriplyETLs that run in a CI/CD environment, the timeout must be set lower than the CI/CD timeout, in order for the Runner to be able to perform the termination step. Verbose mode \u00b6 When TriplyETL is run normally, the following information is displayed: The number of added triples. The runtime of the script. An error message, if any occurred. It is possible to also show the following additional information by specifying the --verbose flag: In case of an error, the first 20 values from the last processed record. In case of an error, the full stack trace. The following example shows how the --verbose flag can be used: npx etl --verbose Secure verbose mode \u00b6 Verbose mode may perform a reset of your current terminal session. If this happens you lose visible access to the commands that were run prior to the last TriplyETL invocation. This destructive behavior of verbose mode can be disabled by setting the following environment variable: export CI=true This fixes the reset issue, but also makes the output less colorful. TriplyETL Tools \u00b6 TriplyETL Tools is a collection of small tools that can be used to run isolated tasks from your terminal application. TriplyETL Tools can be used when you are inside a TriplyETL project. If you do not have an ETL project yet, use the TriplyETL Generator first to create one. The following command prints an overview of the supported tools: npx tools The following tools are supported: Tool Description compare Compare the contents of two RDF files create-token Create a new TriplyDB API Token print-token Print the currently set TriplyDB API Token, if any validate Validate a data file against a SHACL shapes file For each tool, the following command prints more information on how to use it: npx tools {name} --help Compare \u00b6 The compare tool checks whether two RDF files encode the same linked data: - If the two files contain the same data, the command succeeds and does not print any output. - If the two files do not contain the same data, the command exits with an error code, and the difference between the two files is printed. The compare tools is invoked over the two RDF files one.ttl and two.ttl as follows: npx tools compare one.ttl two.ttl This tool can be used to compare two RDF files that contain multiple graphs, for example: npx tools compare one.trig two.trig This tool uses the graph isomorphism property as defined in the RDF 1.1 standard: link Create TriplyDB API Token \u00b6 This tool creates a new TriplyDB API Token from the command-line. This command can be used as follows: npx tools create-token The command will ask a couple of questions in order to create the TriplyDB API Token: The hostname of the TriplyDB instance The name of the token Your TriplyDB account e-mail Your TriplyDB account password The command exists in case a TriplyDB API Token is already configured. Print TriplyDB API Token \u00b6 This tool prints the currently configured TriplyDB API Token, if any. This command can be used as follows: npx tools print-token This command is useful when there are issues with configuring a TriplyDB API Token. Validate \u00b6 This tool validates the content of one data file against the SHACL shapes in another file. The resulting SHACL validation report is printed to standard output. The command can be used as follows: $ npx tools validate -d data.trig -s model.trig See this section to learn more about the SHACL validation report.","title":"Command Line Interface (CLI)"},{"location":"triply-etl/generic/cli/#command-line-interface-cli","text":"TriplyETL allows you to manually perform various tasks in a terminal application (a Command-Line Interface or CLI). Installing dependencies must be repeated when dependencies were changed. Transpiling to JavaScript must be repeated when one or more TypeScript files are changed. TriplyETL Runner allows you to manually run local TriplyETL projects in your terminal. TriplyETL Tools explains how you can perform common ETL tasks.","title":"Command Line Interface (CLI)"},{"location":"triply-etl/generic/cli/#installing-dependencies","text":"When you work on an existing TriplyETL project, you sometimes pull in changes made by your team members. Such changes are typically obtained by running the following Git command: git pull This command prints a list of files that were changed by your team members. If this list includes changes to the file package.json , this means that one or more dependencies were changed. In order to effectuate these changes in your local copy of the TriplyETL project, you must run the following command: npm i","title":"Installing dependencies"},{"location":"triply-etl/generic/cli/#transpiling-to-javascript","text":"When you make changes to one or more TypeScript files, the corresponding JavaScript files will have become outdated. If you now use the TriplyETL Runner , it will use one or more outdated JavaScript files, and will not take into account your most recent changes to the TypeScript files. In order to keep your JavaScript files up-to-date relative to your TypeScript files, you must run the following command after making changes to TypeScript files: npm run build If you edit your TypeScript files repeatedly, having to run this extra command may get tedious. In such cases, you can run the following command to automatically perform the transpile step in the background: npm run dev Notice that this prevents you from using the terminal application for new commands. It is typical to open a new terminal application window, and run the npx etl command from there.","title":"Transpiling to JavaScript"},{"location":"triply-etl/generic/cli/#triplyetl-runner","text":"The TriplyETL Runner allows you to run a local TriplyETL project in your terminal application. We assume that you have a local TriplyETL project in which you can successfully run the npx etl command. Follow the Getting Started instructions for TriplyETL Runner if this is not yet the case. Run the following command to run the ETL pipeline: npx etl This command implicitly uses the file lib/main.js , which is the transpiled JavaScript file that corresponds to the TypeScript file src/main.ts . The following command has the same behavior, but makes explicit which file is used: npx etl lib/main.js Some TriplyETL projects have multiple top-level scripts. In such cases, it is possible to run each of these scripts individually as follows: npx etl lib/some-script.js","title":"TriplyETL Runner"},{"location":"triply-etl/generic/cli/#output-summary","text":"TriplyETL Runner will start processing data. Depending on the size of the data source, the Runner may take more or less time to finish. When the Runner finishes successfully, it will print the following summary: \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 Etl: #Error 0 | #Warning 0 | #Info 0 \u2502 \u2502 #Statements 2 \u2502 \u2502 #Records 2 \u2502 \u2502 Started at 2023-06-18 10:05:20 \u2502 \u2502 Runtime 0 sec \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 This summary includes the following information: \"#Error\" shows the number of errors encountered. With default settings, this number is at most 1, since the Runner will immediately stop after an error occurs. \"#Warning\" shows the number of warnings encountered. With default settings, this includes warnings emitted by the SHACL Validator . \"#Info\" shows the number of informational messages. With default settings, this includes informational messages emitted by the SHACL Validator . \"#Statements\" shows the number of triples or quads that was generated. This number is equal to or higher than the number of statements that is uploaded to the triple store. The reason for this is that TriplyETL processes records in parallel. If the same statement is generated for two records, the number of statements with be incremented by 2, but only 1 unique statement will be uploaded to the triple store. \"#Records\" shows the number of records that was processed. \"Started at\" shows the date and time at which the Runner started. \"Runtime\" shows the wall time duration of the run.","title":"Output summary"},{"location":"triply-etl/generic/cli/#limit-the-number-of-records","text":"When developing a pipeline, it is almost never necessary to process all records from the source data. Instead, it is common to run the ETL for a small number of example record, which results in quick feedback. The --head flag indicates the maximum number of records that is processed by the Runner: npx etl --head 1 npx etl --head 10 These commands run the ETL for the first record (if one is available) and for the first 10 records (if these are available).","title":"Limit the number of records"},{"location":"triply-etl/generic/cli/#specify-a-range-of-records","text":"When developing a pipeline over a large source data collection, it is often standard practice to use the first 10 or 100 records most of the time. The benefit of this approach is that the feedback loop between making changes and receiving feedback is short. A downside of this approach is that the ETL may be overly optimized towards these first few records. For example, if a value is missing in the first 1.000 records, then transformations that are necessary for when the value is present will not be developed initially. An alternative is to run the entire ETL, but that takes a long time. To avoid the downsides of using --head , TriplyETL also supports the --from-record-id flag. This flag specifies the number of records that are skipped. This allows us to specify an arbitrary consecutive range of records. For example, the following processes the 1.001-st until and including the 1.010-th record: npx etl --from-record-id 1000 --head 10","title":"Specify a range of records"},{"location":"triply-etl/generic/cli/#process-a-specific-record","text":"When the --head flag is set to 1, the --from-record-id flag specifies the index of a single specific record that is processed. This is useful when a record is known to be problematic, for instance during debugging. The following command runs TriplyETL for the 27th record: npx etl --from-record-id 26 --head 1","title":"Process a specific record"},{"location":"triply-etl/generic/cli/#set-a-timeout","text":"For large ETL pipelines, it is sometimes useful to specify a maximum duration for which the TriplyETL Runner is allowed to run. In such cases, the --timeout flag can be used. The --timeout option accepts human-readable duration strings, such as '1h 30m 5s', '1hr', '1 hour', or '3hrs'. When the indicated timeout is reached before the pipeline finishes, the TriplyETL Runner will gracefully terminate the ETL by acting as if there are no more incoming records. As a result, the Runner will upload all linked data (graphs) that was produced up to that point, and it will write a performance log. For TriplyETLs that run in a CI/CD environment, the timeout must be set lower than the CI/CD timeout, in order for the Runner to be able to perform the termination step.","title":"Set a timeout"},{"location":"triply-etl/generic/cli/#verbose-mode","text":"When TriplyETL is run normally, the following information is displayed: The number of added triples. The runtime of the script. An error message, if any occurred. It is possible to also show the following additional information by specifying the --verbose flag: In case of an error, the first 20 values from the last processed record. In case of an error, the full stack trace. The following example shows how the --verbose flag can be used: npx etl --verbose","title":"Verbose mode"},{"location":"triply-etl/generic/cli/#secure-verbose-mode","text":"Verbose mode may perform a reset of your current terminal session. If this happens you lose visible access to the commands that were run prior to the last TriplyETL invocation. This destructive behavior of verbose mode can be disabled by setting the following environment variable: export CI=true This fixes the reset issue, but also makes the output less colorful.","title":"Secure verbose mode"},{"location":"triply-etl/generic/cli/#triplyetl-tools","text":"TriplyETL Tools is a collection of small tools that can be used to run isolated tasks from your terminal application. TriplyETL Tools can be used when you are inside a TriplyETL project. If you do not have an ETL project yet, use the TriplyETL Generator first to create one. The following command prints an overview of the supported tools: npx tools The following tools are supported: Tool Description compare Compare the contents of two RDF files create-token Create a new TriplyDB API Token print-token Print the currently set TriplyDB API Token, if any validate Validate a data file against a SHACL shapes file For each tool, the following command prints more information on how to use it: npx tools {name} --help","title":"TriplyETL Tools"},{"location":"triply-etl/generic/cli/#compare","text":"The compare tool checks whether two RDF files encode the same linked data: - If the two files contain the same data, the command succeeds and does not print any output. - If the two files do not contain the same data, the command exits with an error code, and the difference between the two files is printed. The compare tools is invoked over the two RDF files one.ttl and two.ttl as follows: npx tools compare one.ttl two.ttl This tool can be used to compare two RDF files that contain multiple graphs, for example: npx tools compare one.trig two.trig This tool uses the graph isomorphism property as defined in the RDF 1.1 standard: link","title":"Compare"},{"location":"triply-etl/generic/cli/#create-triplydb-api-token","text":"This tool creates a new TriplyDB API Token from the command-line. This command can be used as follows: npx tools create-token The command will ask a couple of questions in order to create the TriplyDB API Token: The hostname of the TriplyDB instance The name of the token Your TriplyDB account e-mail Your TriplyDB account password The command exists in case a TriplyDB API Token is already configured.","title":"Create TriplyDB API Token"},{"location":"triply-etl/generic/cli/#print-triplydb-api-token","text":"This tool prints the currently configured TriplyDB API Token, if any. This command can be used as follows: npx tools print-token This command is useful when there are issues with configuring a TriplyDB API Token.","title":"Print TriplyDB API Token"},{"location":"triply-etl/generic/cli/#validate","text":"This tool validates the content of one data file against the SHACL shapes in another file. The resulting SHACL validation report is printed to standard output. The command can be used as follows: $ npx tools validate -d data.trig -s model.trig See this section to learn more about the SHACL validation report.","title":"Validate"},{"location":"triply-etl/generic/control-structures/","text":"On this page: Control Structures Process data conditionally (when()) Missing values The empty string NULL values (when() and whenNotEqual()) Iterating over lists of objects (forEach()) Index key ($index) Parent key ($parent) Root key ($root) Iterating over lists of primitives Specify multiple conditions (ifElse()) Parameters Example 1 Example 2 Switch between different cases (_switch()) Parameters Example 1 Example 2 Skipping remaining functions (skipRest()) Parameters Control Structures \u00b6 This page documents how you can use control structures in your ETL configuration. Process data conditionally ( when() ) \u00b6 Source data often contains optional values. These are values that appear in some, but not all records. Source data often contains 'special' values that denote the absence of a value. Common examples are values such as 'NULL' or the empty string ( '' ) or 'clear' outliers such as 9999 for a missing year. We call such values \u2018null values\u2019. The when() function allows part of a TriplyETL configuration to run when certain conditions are met. The first parameter is used to determine whether or not the remaining parameters should be called: when('{condition}', '{statement-1}', '{statement-2}', '{statement-3}', // etc ), Notice that it is often useful to specify multiple statements under the same condition: The first statement transforms an optional value, and the second statement uses the transformed optional value to make a triple assertion. The first statement asserts one triple based on the optional value, and the second statement asserts a second triple based on the same optional value. Missing values \u00b6 If a value is sometimes completely missing from a source data record, the when() conditional function can be used. The following code snippet assets a triple if and only if a value for the 'zipcode' key is present in the Record: when(context => context.isNotEmpty('zipcode'), triple(iri(prefix.id, 'id'), def.zipcode, 'zipcode'), ), Since checking for the presence or absence of a single record is very common, the above can also be written as follows: when('zipcode', triple(iri(prefix.id, 'id'), def.zipcode, 'zipcode'), ), The empty string \u00b6 In many source data formats, the empty string is used to signify a missing value, this particular string is treated in a special way by when() . A key whose value is the empty string is treated in the same way as a key that is altogether absent. The following code snippet will not print the record to standard output, because the 'zipcode' key is considered empty: fromJson([{ zipcode: '' }]), when('zipcode', logRecord(), ), Notice that it is almost never useful to store the empty string in linked data. So the treatment of empty strings as NULL values is the correct default behavior. NULL values ( when() and whenNotEqual() ) \u00b6 If a key contains specific values that are indended to represent NULL values, then these must be specifically identified the first when() parameter. The following code snippet identifies the value 9999 for the 'created' key as denoting a NULL values. This means that the year 9999 is used in the source system whenever the actual year of creation was unknown. when(context => context.getNumber('created') != 9999, triple(iri(prefix.id, 'id'), dct.created, literal('created', xsd.gYear)), ), Since checking the value of one specific key is very common, the above can be written as follows, using the more specific whenNotEqual function: whenNotEqual('created', 9999, triple(iri(prefix.id, 'id'), dct.created, literal('created', xsd.gYear)), ), Notice that the use of whenNotEqual() makes the configuration easier to read. The same shorthand notation works when there are multiple NULL values in the source data. The following code snippet only asserts a triple if the year of creation is neither 9999 nor -1. Notice that the array can contain any number of potential NULL values: whenNotEqual('created', [-1, 9999], triple(iri(prefix.id, 'id'), dct.created, literal('created', xsd.gYear)), ), Iterating over lists of objects ( forEach() ) \u00b6 In the previous section, we saw that we were able to assert the name of the first country and the name of the second country. But what do we do if we want to assert the name for every country in the world? And what do we do if some countries have a name in 2 languages, but other countries have a name in 1 or 3 languages? What we need is a simple way to express that we want to make an assertion for every element in a list. TriplyETL provides the forEach() function for this purpose. The following code snippet asserts the name for each country in the example data: forEach('data.countries', triple(iri(prefix.id, 'id'), rdfs.label, 'name'), ), Notice the following details: - forEach() uses the path expression 'data.countries' to identify the list. - Inside the forEach() function, each element in the list is made available separately. - This allows the 'id' and 'name' keys to be identified directly. The above code snippet makes one assertion for every element in the \"countries\" list: country:nl rdfs:label 'The Netherlands'. country:de rdfs:label 'Germany'. Notice that forEach() only works for lists whose elements are objects*. See Iterating over lists of primitives for dealing with lists that do not contain objects. The elements that forEach() iterates over are themselves (sub)records. This implies that all functions that work for full records also work for the (sub)records inside forEach() . The (sub)records inside an forEach() function are smaller. This allows the regular keys of the iterated-over elements to be accessed directly. In addition to these regular keys, (sub)records inside forEach() also contain additional keys that simplify common operations. The following subsections explain the following special keys: Index key ( $index ) Parent key ( $parent ) Root key ( $root ) Index key ( $index ) \u00b6 Each (sub)record that is made available in forEach() contains the $index key. The value of this key is the index of the element in the list. This is the same index that is used to access specific elements in an list, as explained in the section on accessing lists by index . The index key is often useful for assigning a unique subject IRI to every element. Suppose we have the following source data. We do not want to use the values of the \"name\" key for our subject IRI, because these names contain spaces and possibly other problematic characters that make the IRI more difficult to read and use. { \"countries\": [ { \"name\": \"The Netherlands\" }, { \"name\": \"Germany\" }, { \"name\": \"Italy\" } ] } The following code snippet uses the $index key that is made available inside forEach in order to create a unique subject IRI for each country: forEach('countries', triple(iri(prefix.id, '$index'), rdfs.label, 'name'), ), This results in the following assertions: country:0 rdfs:label 'The Netherlands'. country:1 rdfs:label 'Germany'. country:2 rdfs:label 'Italy'. Parent key ( $parent ) \u00b6 When forEach() iterates through a list of elements, it makes the enclosingparent* record available under key $parent . The parent record is the record that directly contains the first key that appears in the path that was specified in forEach() . For example, the parent record in the following call is the record that directly contains the \"data\" key: forEach('data.countries', // etc ), The $parent key can be observed when logRecord` is used to print the iterated-over elements to the terminal: forEach('data.countries', logRecord(), ), For our example source data, this emits the following 2 records: { \"id\": \"en\", \"name\": \"The Netherlands\", \"$index\": 0, \"$parent\": { \"data\": { \"labels\": [ { \"id\": \"en\", \"name\": \"The Netherlands\", }, { \"id\": \"de\" \"name\": \"Germany\", } ] } }, \"$root\": \"__circular__\" } and: { \"id\": \"de\", \"name\": \"Germany\", \"$index\": 1, \"$parent\": { \"data\": { \"labels\": [ { \"id\": \"en\", \"name\": \"The Netherlands\", }, { \"id\": \"de\" \"name\": \"Germany\", } ] } }, \"$root\": \"__circular__\" } The $root key is explained in the next section . Root key ( $root ) \u00b6 Sometimes it may be necessary to access a part of the original record that is outside of the scope of the forEach() call. Every (sub)record inside a forEach() call contains the \"$root\" key. The value of the root key provides a link to the full record. Because the $root key is part of the linked-to record, it is not possible to print the value of the root key. (This would result in infinite output.) For this reason, the value of the $root key is printed as the special value \"__circular__\" . For the above examples, the parent record and root record are the same, but this is not always the case. Specifically, the parent record and root record are different when forEach() calls are nested. The following data contains an inner list (key \"labels\" ) inside an outer list ( \"countries\" ): { \"data\": { \"countries\": [ { \"id\": \"NL\", \"labels\": [ { \"name\": \"The Netherlands\", \"locale\": \"en-us\" }, { \"name\": \"Nederland\", \"locale\": \"nl-nl\" } ] }, { \"id\": \"EN\", \"labels\": [ { \"name\": \"England\", \"locale\": \"en-gb\" } ] } ] } } The following nested forEach() call shows the difference between the \"$parent\" key and the $root key. The $parent key allows the individual country objects to be accessed, while the \"$root\" key allows the entire tree to be accessed: forEach('data.countries', forEach('labels', logRecord(), ), ), The following record is printed first (3 records are printed in total). Notice that the value of the outer $parent and \"$root\" keys are now different: - The $parent key allows access to the first country. - The $root key allows access to the full record (describing multiple countries). { \"name\": \"The Netherlands\", \"locale\": \"en-us\", \"$index\": 0, \"$parent\": { \"id\": \"NL\", \"labels\": [ { \"name\": \"The Netherlands\", \"locale\": \"en-us\" }, { \"name\": \"Nederland\", \"locale\": \"nl-nl\" } ], \"$index\": 0, \"$parent\": { \"data\": { \"countries\": [ { \"id\": \"NL\", \"labels\": \"__circular__\" }, { \"id\": \"EN\", \"labels\": [ { \"name\": \"England\", \"locale\": \"en-gb\" } ] } ] } }, \"$root\": \"__circular__\" }, \"$root\": \"__circular__\" } Iterating over lists of primitives \u00b6 Notice that forEach() can only iterate over Records. Sometimes, it is necessary to iterate over primitive values, for example strings. If the iteration directly results in RDF assertions, iris() and literals() can be used. But in other cases, we must map the primitive values to objects. These objects can then be processed as regular Records. Here is an example: fromJson({value: 'a b c'}), split({content: 'value', separator: ' ', key: 'valueStrings'}), // Since we cannot transform 'valueStrings' directly, we first create // objects that contain those strings ('valueObjects'). custom.addFrom.value({ content: 'valueStrings', change: values => (values as string[]).map(value => ({'valueObject': value})), type: 'unknown', key: 'valueObjects' }), // Here we can apply any regular transformation that works with Records. Specify multiple conditions ( ifElse() ) \u00b6 The ifElse() function in TriplyETL allows us to specify multiple conditions based on which other functions are run. Every condition is specified with an if key. In case the condition is true, the functions specified in the then key are run. If none of the if conditions are true, the functions specified in an else key, if present, are run. Parameters \u00b6 The first parameter must be an { if: ..., then: ... } object. The non-first parameters are either additional { if: ..., then: ... } objects or a final { else: ... } object. Each if key specifies a condition that is either true or false. Conditions are either a key name or a function that takes the Etl Context and returns a Boolean value. Specifying a key name is identical to specifying the following function: ctx => ctx.getString('KEY') The then and else keys take either one function, or an array of zero or more functions. Example 1 \u00b6 The following code snippet uses different conditions to determine the age category that a person belongs to: fromJson([ { id: 'johndoe', age: 12 }, { id: 'janedoe', age: 32 }, // ... ]), addIri({ prefix: prefix.person, content: 'id', key: '_person', }), ifElse({ if: ctx => ctx.getNumber('age') < 12, then: triple('_person', a, def.Child), }, { if: ctx => { const age = ctx.getNumber('age') return age >= 12 && age < 20 }, then: triple('_person', a, def.Teenager), }, { if: ctx => { const age = ctx.getNumber('age') return age >= 20 && age < 65 }, then: triple('_person', a, def.Adult), }, { else: triple('_person', a, def.Senior), }), Example 2 \u00b6 The following snippet either asserts data about persons or data about organizations, and uses an ifElse to make the conditional determination on which assertion to make: fromJson([ { first: 'John', last: 'Doe' }, { name: 'Triply' }, ]), ifElse({ if: 'name', then: pairs(iri(prefix.id, 'name'), [a, sdo.Organization], [sdo.name, 'name'], ), }, { else: [ concat({ content: ['first', 'last'], separator: '-', key: 'name', }), pairs(iri(prefix.id, 'name'), [a, sdo.Person], [sdo.givenName, 'first'], [sdo.familyName, 'last'], ), ], }), Switch between different cases ( _switch() ) \u00b6 The function _switch() allows us to switch between different cases, based on the value of a specified key. The signature is as follows; _switch(key, [value_1, functions_1], ..., [value_n, functions_n], default_functions, ) Parameters \u00b6 key The key parameter whose value is compared against the specified values. Each case consists of a list of two elements: value_i is the value that is checked for equivalence with the value stored in key . functions_i is the function or list of functions that is executed when the value in key is equivalent to value_i . default_functions is the function or list of functions that is executed when key matches neither of the cases. Notice that we must write _switch() because switch is a reserved keyword in ECMAScript. An error is emitted if the value for key does not match any of the cases and no default case is specified. Example 1 \u00b6 When an ETL uses multiple data sources, we can use a _switch() to run a dedicated sub-ETL for each data source. Suppose we have two tabular data sources: file.episodes and file.people . We can use the following _switch() statement to run different sub-ETLs: _switch(key.fileName, [file.episodes, etl_episodes], [file.people, etl_people], ), Example 2 \u00b6 When ETLs transform different kinds of entities, it can be useful to run a sub-ETL based on the type of entity. For example, if the current Etl Record represents a person, we want to assert their age. But if the current Etl Record represents a location, we want to assert its latitude and longitude: const etl_location = [ triple('iri', sdo.latitude, literal('lat', xsd.double)), triple('iri', sdo.longitude, literal('long', xsd.double)), ] const etl_person = [ triple('iri', sdo.age, literal('age', xsd.nonNegativeInteger)), ] etl.run( _switch('type', ['location', etl_location], ['person', etl_person], ), ) Skipping remaining functions ( skipRest() ) \u00b6 The skipRest() function allows us to stop the execution of any subsequent functions declared within the same code block where the skipRest() function is located. When you provide a key argument, skipRest() will skip over any following functions if the specified key is found in the record. Whenever there is no key argument specified, any functions after skipRest() will not be executed. Parameters \u00b6 key The optional key parameter value is compared against the keys in the record, if present in the record the remaining functions will not be executed. Example 1 : The following code snippet will stop executing any function after skipRest() , because no key is specified: fromJson([ { id: '123', first: 'John', last: 'Doe' }, { id: '456', first: 'Jane', last: 'Smith' }, ]), addIri({ content: 'id', key: '_id', prefix: prefix.person, }), triple('_id', foaf.lastName, 'last'), skipRest(), triple('_id', foaf.firstName, 'first') Since skipRest() is declared before triple('_id', foaf.firstName, 'first') , the following assertion is not made: triple('_id', foaf.firstName, 'first') Example 2 : whenForEach with a specified key for skipRest() : fromJson( { Person: [ { firstName: 'John', lastName: 'Doe' }, { firstName: 'Tom', last: 'Smith' }, { firstName: 'Lisa', lastName: 'Kennedy' } ] } ), whenForEach(\"Person\", skipRest('last'), addIri({ content: 'firstName', key: '_firstName', prefix: prefix.person }), triple('_firstName', foaf.firstName, 'firstName'), triple('_firstName', foaf.lastName, 'lastName')), As a result, only the following triples will be asserted: \"John\"; \"Doe\". \"Lisa\"; \"Kennedy\" Note that the record for \"Tom Smith\" was skipped, and no triples were asserted! This because the key 'last' was present in that record, and due to the usage of skipRest('last') , all functions after skipRest() will not be executed.","title":"Control Structures"},{"location":"triply-etl/generic/control-structures/#control-structures","text":"This page documents how you can use control structures in your ETL configuration.","title":"Control Structures"},{"location":"triply-etl/generic/control-structures/#process-data-conditionally-when","text":"Source data often contains optional values. These are values that appear in some, but not all records. Source data often contains 'special' values that denote the absence of a value. Common examples are values such as 'NULL' or the empty string ( '' ) or 'clear' outliers such as 9999 for a missing year. We call such values \u2018null values\u2019. The when() function allows part of a TriplyETL configuration to run when certain conditions are met. The first parameter is used to determine whether or not the remaining parameters should be called: when('{condition}', '{statement-1}', '{statement-2}', '{statement-3}', // etc ), Notice that it is often useful to specify multiple statements under the same condition: The first statement transforms an optional value, and the second statement uses the transformed optional value to make a triple assertion. The first statement asserts one triple based on the optional value, and the second statement asserts a second triple based on the same optional value.","title":"Process data conditionally (when())"},{"location":"triply-etl/generic/control-structures/#missing-values","text":"If a value is sometimes completely missing from a source data record, the when() conditional function can be used. The following code snippet assets a triple if and only if a value for the 'zipcode' key is present in the Record: when(context => context.isNotEmpty('zipcode'), triple(iri(prefix.id, 'id'), def.zipcode, 'zipcode'), ), Since checking for the presence or absence of a single record is very common, the above can also be written as follows: when('zipcode', triple(iri(prefix.id, 'id'), def.zipcode, 'zipcode'), ),","title":"Missing values"},{"location":"triply-etl/generic/control-structures/#the-empty-string","text":"In many source data formats, the empty string is used to signify a missing value, this particular string is treated in a special way by when() . A key whose value is the empty string is treated in the same way as a key that is altogether absent. The following code snippet will not print the record to standard output, because the 'zipcode' key is considered empty: fromJson([{ zipcode: '' }]), when('zipcode', logRecord(), ), Notice that it is almost never useful to store the empty string in linked data. So the treatment of empty strings as NULL values is the correct default behavior.","title":"The empty string"},{"location":"triply-etl/generic/control-structures/#null-values-when-and-whennotequal","text":"If a key contains specific values that are indended to represent NULL values, then these must be specifically identified the first when() parameter. The following code snippet identifies the value 9999 for the 'created' key as denoting a NULL values. This means that the year 9999 is used in the source system whenever the actual year of creation was unknown. when(context => context.getNumber('created') != 9999, triple(iri(prefix.id, 'id'), dct.created, literal('created', xsd.gYear)), ), Since checking the value of one specific key is very common, the above can be written as follows, using the more specific whenNotEqual function: whenNotEqual('created', 9999, triple(iri(prefix.id, 'id'), dct.created, literal('created', xsd.gYear)), ), Notice that the use of whenNotEqual() makes the configuration easier to read. The same shorthand notation works when there are multiple NULL values in the source data. The following code snippet only asserts a triple if the year of creation is neither 9999 nor -1. Notice that the array can contain any number of potential NULL values: whenNotEqual('created', [-1, 9999], triple(iri(prefix.id, 'id'), dct.created, literal('created', xsd.gYear)), ),","title":"NULL values (when() and whenNotEqual())"},{"location":"triply-etl/generic/control-structures/#iterating-over-lists-of-objects-foreach","text":"In the previous section, we saw that we were able to assert the name of the first country and the name of the second country. But what do we do if we want to assert the name for every country in the world? And what do we do if some countries have a name in 2 languages, but other countries have a name in 1 or 3 languages? What we need is a simple way to express that we want to make an assertion for every element in a list. TriplyETL provides the forEach() function for this purpose. The following code snippet asserts the name for each country in the example data: forEach('data.countries', triple(iri(prefix.id, 'id'), rdfs.label, 'name'), ), Notice the following details: - forEach() uses the path expression 'data.countries' to identify the list. - Inside the forEach() function, each element in the list is made available separately. - This allows the 'id' and 'name' keys to be identified directly. The above code snippet makes one assertion for every element in the \"countries\" list: country:nl rdfs:label 'The Netherlands'. country:de rdfs:label 'Germany'. Notice that forEach() only works for lists whose elements are objects*. See Iterating over lists of primitives for dealing with lists that do not contain objects. The elements that forEach() iterates over are themselves (sub)records. This implies that all functions that work for full records also work for the (sub)records inside forEach() . The (sub)records inside an forEach() function are smaller. This allows the regular keys of the iterated-over elements to be accessed directly. In addition to these regular keys, (sub)records inside forEach() also contain additional keys that simplify common operations. The following subsections explain the following special keys: Index key ( $index ) Parent key ( $parent ) Root key ( $root )","title":"Iterating over lists of objects (forEach())"},{"location":"triply-etl/generic/control-structures/#index-key-index","text":"Each (sub)record that is made available in forEach() contains the $index key. The value of this key is the index of the element in the list. This is the same index that is used to access specific elements in an list, as explained in the section on accessing lists by index . The index key is often useful for assigning a unique subject IRI to every element. Suppose we have the following source data. We do not want to use the values of the \"name\" key for our subject IRI, because these names contain spaces and possibly other problematic characters that make the IRI more difficult to read and use. { \"countries\": [ { \"name\": \"The Netherlands\" }, { \"name\": \"Germany\" }, { \"name\": \"Italy\" } ] } The following code snippet uses the $index key that is made available inside forEach in order to create a unique subject IRI for each country: forEach('countries', triple(iri(prefix.id, '$index'), rdfs.label, 'name'), ), This results in the following assertions: country:0 rdfs:label 'The Netherlands'. country:1 rdfs:label 'Germany'. country:2 rdfs:label 'Italy'.","title":"Index key ($index)"},{"location":"triply-etl/generic/control-structures/#parent-key-parent","text":"When forEach() iterates through a list of elements, it makes the enclosingparent* record available under key $parent . The parent record is the record that directly contains the first key that appears in the path that was specified in forEach() . For example, the parent record in the following call is the record that directly contains the \"data\" key: forEach('data.countries', // etc ), The $parent key can be observed when logRecord` is used to print the iterated-over elements to the terminal: forEach('data.countries', logRecord(), ), For our example source data, this emits the following 2 records: { \"id\": \"en\", \"name\": \"The Netherlands\", \"$index\": 0, \"$parent\": { \"data\": { \"labels\": [ { \"id\": \"en\", \"name\": \"The Netherlands\", }, { \"id\": \"de\" \"name\": \"Germany\", } ] } }, \"$root\": \"__circular__\" } and: { \"id\": \"de\", \"name\": \"Germany\", \"$index\": 1, \"$parent\": { \"data\": { \"labels\": [ { \"id\": \"en\", \"name\": \"The Netherlands\", }, { \"id\": \"de\" \"name\": \"Germany\", } ] } }, \"$root\": \"__circular__\" } The $root key is explained in the next section .","title":"Parent key ($parent)"},{"location":"triply-etl/generic/control-structures/#root-key-root","text":"Sometimes it may be necessary to access a part of the original record that is outside of the scope of the forEach() call. Every (sub)record inside a forEach() call contains the \"$root\" key. The value of the root key provides a link to the full record. Because the $root key is part of the linked-to record, it is not possible to print the value of the root key. (This would result in infinite output.) For this reason, the value of the $root key is printed as the special value \"__circular__\" . For the above examples, the parent record and root record are the same, but this is not always the case. Specifically, the parent record and root record are different when forEach() calls are nested. The following data contains an inner list (key \"labels\" ) inside an outer list ( \"countries\" ): { \"data\": { \"countries\": [ { \"id\": \"NL\", \"labels\": [ { \"name\": \"The Netherlands\", \"locale\": \"en-us\" }, { \"name\": \"Nederland\", \"locale\": \"nl-nl\" } ] }, { \"id\": \"EN\", \"labels\": [ { \"name\": \"England\", \"locale\": \"en-gb\" } ] } ] } } The following nested forEach() call shows the difference between the \"$parent\" key and the $root key. The $parent key allows the individual country objects to be accessed, while the \"$root\" key allows the entire tree to be accessed: forEach('data.countries', forEach('labels', logRecord(), ), ), The following record is printed first (3 records are printed in total). Notice that the value of the outer $parent and \"$root\" keys are now different: - The $parent key allows access to the first country. - The $root key allows access to the full record (describing multiple countries). { \"name\": \"The Netherlands\", \"locale\": \"en-us\", \"$index\": 0, \"$parent\": { \"id\": \"NL\", \"labels\": [ { \"name\": \"The Netherlands\", \"locale\": \"en-us\" }, { \"name\": \"Nederland\", \"locale\": \"nl-nl\" } ], \"$index\": 0, \"$parent\": { \"data\": { \"countries\": [ { \"id\": \"NL\", \"labels\": \"__circular__\" }, { \"id\": \"EN\", \"labels\": [ { \"name\": \"England\", \"locale\": \"en-gb\" } ] } ] } }, \"$root\": \"__circular__\" }, \"$root\": \"__circular__\" }","title":"Root key ($root)"},{"location":"triply-etl/generic/control-structures/#iterating-over-lists-of-primitives","text":"Notice that forEach() can only iterate over Records. Sometimes, it is necessary to iterate over primitive values, for example strings. If the iteration directly results in RDF assertions, iris() and literals() can be used. But in other cases, we must map the primitive values to objects. These objects can then be processed as regular Records. Here is an example: fromJson({value: 'a b c'}), split({content: 'value', separator: ' ', key: 'valueStrings'}), // Since we cannot transform 'valueStrings' directly, we first create // objects that contain those strings ('valueObjects'). custom.addFrom.value({ content: 'valueStrings', change: values => (values as string[]).map(value => ({'valueObject': value})), type: 'unknown', key: 'valueObjects' }), // Here we can apply any regular transformation that works with Records.","title":"Iterating over lists of primitives"},{"location":"triply-etl/generic/control-structures/#specify-multiple-conditions-ifelse","text":"The ifElse() function in TriplyETL allows us to specify multiple conditions based on which other functions are run. Every condition is specified with an if key. In case the condition is true, the functions specified in the then key are run. If none of the if conditions are true, the functions specified in an else key, if present, are run.","title":"Specify multiple conditions (ifElse())"},{"location":"triply-etl/generic/control-structures/#parameters","text":"The first parameter must be an { if: ..., then: ... } object. The non-first parameters are either additional { if: ..., then: ... } objects or a final { else: ... } object. Each if key specifies a condition that is either true or false. Conditions are either a key name or a function that takes the Etl Context and returns a Boolean value. Specifying a key name is identical to specifying the following function: ctx => ctx.getString('KEY') The then and else keys take either one function, or an array of zero or more functions.","title":"Parameters"},{"location":"triply-etl/generic/control-structures/#example-1","text":"The following code snippet uses different conditions to determine the age category that a person belongs to: fromJson([ { id: 'johndoe', age: 12 }, { id: 'janedoe', age: 32 }, // ... ]), addIri({ prefix: prefix.person, content: 'id', key: '_person', }), ifElse({ if: ctx => ctx.getNumber('age') < 12, then: triple('_person', a, def.Child), }, { if: ctx => { const age = ctx.getNumber('age') return age >= 12 && age < 20 }, then: triple('_person', a, def.Teenager), }, { if: ctx => { const age = ctx.getNumber('age') return age >= 20 && age < 65 }, then: triple('_person', a, def.Adult), }, { else: triple('_person', a, def.Senior), }),","title":"Example 1"},{"location":"triply-etl/generic/control-structures/#example-2","text":"The following snippet either asserts data about persons or data about organizations, and uses an ifElse to make the conditional determination on which assertion to make: fromJson([ { first: 'John', last: 'Doe' }, { name: 'Triply' }, ]), ifElse({ if: 'name', then: pairs(iri(prefix.id, 'name'), [a, sdo.Organization], [sdo.name, 'name'], ), }, { else: [ concat({ content: ['first', 'last'], separator: '-', key: 'name', }), pairs(iri(prefix.id, 'name'), [a, sdo.Person], [sdo.givenName, 'first'], [sdo.familyName, 'last'], ), ], }),","title":"Example 2"},{"location":"triply-etl/generic/control-structures/#switch-between-different-cases-_switch","text":"The function _switch() allows us to switch between different cases, based on the value of a specified key. The signature is as follows; _switch(key, [value_1, functions_1], ..., [value_n, functions_n], default_functions, )","title":"Switch between different cases (_switch())"},{"location":"triply-etl/generic/control-structures/#parameters_1","text":"key The key parameter whose value is compared against the specified values. Each case consists of a list of two elements: value_i is the value that is checked for equivalence with the value stored in key . functions_i is the function or list of functions that is executed when the value in key is equivalent to value_i . default_functions is the function or list of functions that is executed when key matches neither of the cases. Notice that we must write _switch() because switch is a reserved keyword in ECMAScript. An error is emitted if the value for key does not match any of the cases and no default case is specified.","title":"Parameters"},{"location":"triply-etl/generic/control-structures/#example-1_1","text":"When an ETL uses multiple data sources, we can use a _switch() to run a dedicated sub-ETL for each data source. Suppose we have two tabular data sources: file.episodes and file.people . We can use the following _switch() statement to run different sub-ETLs: _switch(key.fileName, [file.episodes, etl_episodes], [file.people, etl_people], ),","title":"Example 1"},{"location":"triply-etl/generic/control-structures/#example-2_1","text":"When ETLs transform different kinds of entities, it can be useful to run a sub-ETL based on the type of entity. For example, if the current Etl Record represents a person, we want to assert their age. But if the current Etl Record represents a location, we want to assert its latitude and longitude: const etl_location = [ triple('iri', sdo.latitude, literal('lat', xsd.double)), triple('iri', sdo.longitude, literal('long', xsd.double)), ] const etl_person = [ triple('iri', sdo.age, literal('age', xsd.nonNegativeInteger)), ] etl.run( _switch('type', ['location', etl_location], ['person', etl_person], ), )","title":"Example 2"},{"location":"triply-etl/generic/control-structures/#skipping-remaining-functions-skiprest","text":"The skipRest() function allows us to stop the execution of any subsequent functions declared within the same code block where the skipRest() function is located. When you provide a key argument, skipRest() will skip over any following functions if the specified key is found in the record. Whenever there is no key argument specified, any functions after skipRest() will not be executed.","title":"Skipping remaining functions (skipRest())"},{"location":"triply-etl/generic/control-structures/#parameters_2","text":"key The optional key parameter value is compared against the keys in the record, if present in the record the remaining functions will not be executed. Example 1 : The following code snippet will stop executing any function after skipRest() , because no key is specified: fromJson([ { id: '123', first: 'John', last: 'Doe' }, { id: '456', first: 'Jane', last: 'Smith' }, ]), addIri({ content: 'id', key: '_id', prefix: prefix.person, }), triple('_id', foaf.lastName, 'last'), skipRest(), triple('_id', foaf.firstName, 'first') Since skipRest() is declared before triple('_id', foaf.firstName, 'first') , the following assertion is not made: triple('_id', foaf.firstName, 'first') Example 2 : whenForEach with a specified key for skipRest() : fromJson( { Person: [ { firstName: 'John', lastName: 'Doe' }, { firstName: 'Tom', last: 'Smith' }, { firstName: 'Lisa', lastName: 'Kennedy' } ] } ), whenForEach(\"Person\", skipRest('last'), addIri({ content: 'firstName', key: '_firstName', prefix: prefix.person }), triple('_firstName', foaf.firstName, 'firstName'), triple('_firstName', foaf.lastName, 'lastName')), As a result, only the following triples will be asserted: \"John\"; \"Doe\". \"Lisa\"; \"Kennedy\" Note that the record for \"Tom Smith\" was skipped, and no triples were asserted! This because the key 'last' was present in that record, and due to the usage of skipRest('last') , all functions after skipRest() will not be executed.","title":"Parameters"},{"location":"triply-etl/generic/debug/","text":"On this page: Debug Overview Function logMemory() Function logQuads() Function logQuery() Function logRecord() Use when writing a new ETL Observe the effects of transformations Log a specific key Functions traceStart() and traceEnd() Debug \u00b6 TriplyETL includes functions that can be used during debugging. These debug function allow you to inspect in a detailed way how data flows through your pipeline. This allows you to find problems more quickly, and allows you to determine whether data is handled correctly by your TriplyETL configuration. Overview \u00b6 The following debug function are available: Function Description logMemory() Prints the current memory consumption. logQuads() Prints the contents of the internal store to standard output. logQuery() Prints a query string to standard output. logRecord() Prints the record in its current state to standard output. traceEnd() Ends a trace of the record and internal store. traceStart() Starts a trace of the record and internal store. These functions can be imported from the debug module: import { logMemory, logQuads, logQuery, logRecord, traceEnd, traceStart } from '@triplyetl/etl/debug' Function logMemory() \u00b6 This function prints information about the current memory consumption. It includes the following fields: Field name Meaning Use case CallCount The number of times that a specific use of logMemory() can been invoked. Find a location in your ETL script that is visited many times, e.g. because it occurs inside a (nested) loop. RecordId The numeric identifier of the record that is currently processed. Find a specific record that causes memory consumption to increase. Heap used The number of megabytes that are currently used on the heap. Find places in your ETL where an unexpected amount of memory is used. Heap total The number of megabytes that are currently allocated on the heap. Find places in your ETL where memory reallocation occurs. The following code snippet prints the memory consumption of TriplyETL for each record (first call), and for each member of key 'a' (second call): fromJson([{ a: [{ b: 1 }, { b: 2 }] }, { a: [] }, { a: [] }]), logMemory(), forEach('a', logMemory()), This prints the following messages to standard output: Info CallCount: 1 | RecordId: 1 | Heap (MB) used: 92 / total: 122 Info CallCount: 1 | RecordId: 1 | Heap (MB) used: 92 / total: 122 Info CallCount: 2 | RecordId: 1 | Heap (MB) used: 92 / total: 122 Info CallCount: 2 | RecordId: 2 | Heap (MB) used: 92 / total: 122 Info CallCount: 3 | RecordId: 2 | Heap (MB) used: 92 / total: 122 Function logQuads() \u00b6 This function prints the current contents of the internal store to standard output. The following snippet asserts one triple into the default graph of the internal store, and then prints the contents of the internal store: fromJson([{}]), triple(rdfs.Class, a, rdfs.Class), logQuads(), This results in the following output: @prefix rdf: . @prefix rdfs: . @prefix sdo: . { rdfs:Class a rdfs:Class } Function logQuery() \u00b6 This function prints a query string to standard output. This is specifically useful when the query string is stored in an external system, e.g. a SPARQL query string that is stored on a TriplyDB server: logQuery(Source.TriplyDb.query('my-account', 'my-query')), Depending on the query string that is stored in 'my-query' , this could result in the following output: select * { ?s ?p ?o. } limit 10 Function logRecord() \u00b6 This function prints the current state of the record to standard output. The record is a generic representation of the data that is extracted from one of the data sources (see the Record documentation page for more information). The following snippet prints the inline JSON record to standard output: fromJson([{ a: 1 }]), logRecord(), This emits the following: { \"a\": 1, \"$recordId\": 1, \"$environment\": \"Development\" } Use when writing a new ETL \u00b6 When writing a new ETL, logRecord() is often used as the first function to invoke immediately after extracting the record. For example: fromJson(Source.url('https://example.com/some/api/call')), logRecord(), Since this prints a full overview of what is available in the data source, this forms a good starting point for writing the rest of the ETL configurations. Observe the effects of transformations \u00b6 Another common use case for logRecord() is to observe the record at different moments in time. This is specifically useful to observe the effects of transformation functions , since these are the functions that modify the record. The following snippet logs the record directly before and directly after the transformation function split() is called. fromJson([{ a: '1, 2, 3' }]), logRecord(), split({ content: 'a', separator: ',', key: 'b' }), logRecord(), This makes it easy to observe the result of applying the transformation function: Running lib/main.js { \"a\": \"1, 2, 3\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"a\": \"1, 2, 3\", \"$recordId\": 1, \"$environment\": \"Development\", \"b\": [ \"1\", \"2\", \"3\" ] } Log a specific key \u00b6 Since records can be quite long, in some cases it may be easier to print only a specific key. The following code snippet only prints the key that was added by the transformation function: fromJson([{ a: '1, 2, 3' }]), split({ content: 'a', separator: ',', key: 'b' }), logRecord({ key: 'b' }), This results in the following output: [ \"1\", \"2\", \"3\" ] Functions traceStart() and traceEnd() \u00b6 Sometimes you are interested to find one specific record based on a certain value of a key and/or to see the changes in this record made by specific middlewares. For these purposes, trace middleware can be used. Below, there is an example of how this middleware can be used: fromJson([ { a: 1, b: 1 }, // first dummy record { a: 2, b: 2 }, // second dummy record ]), change({key:'a', type:'number', change: (val) => val +100}), // change the 'a' key traceStart(), change({key:'b', type:'number', change: (val) => val +100}), // change the 'b' key traceEnd(), The result would be: \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 Record trace information \u2502 \u2502 { \u2502 \u2502 \"a\": 101, \u2502 \u2502 \"b\": 1 \u2502 \u2502 \"b\": 101 \u2502 \u2502 } \u2502 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 Quads trace information (unchanged) \u2502 \u2502 empty \u2502 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 To rerun the traced middlewares for this record use the following command: > npx etl lib/{script-name} --trace .trace-1650542307095 In your terminal the line with \"b\": 1 will be red colored, showing the previous state of this key-value and the line with \"b\": 101 will be green colored, showing the new state. Also you can rerun the trace information for this specific record by running: npx etl lib/{script-name} --trace .trace-1650542307095","title":"Debug"},{"location":"triply-etl/generic/debug/#debug","text":"TriplyETL includes functions that can be used during debugging. These debug function allow you to inspect in a detailed way how data flows through your pipeline. This allows you to find problems more quickly, and allows you to determine whether data is handled correctly by your TriplyETL configuration.","title":"Debug"},{"location":"triply-etl/generic/debug/#overview","text":"The following debug function are available: Function Description logMemory() Prints the current memory consumption. logQuads() Prints the contents of the internal store to standard output. logQuery() Prints a query string to standard output. logRecord() Prints the record in its current state to standard output. traceEnd() Ends a trace of the record and internal store. traceStart() Starts a trace of the record and internal store. These functions can be imported from the debug module: import { logMemory, logQuads, logQuery, logRecord, traceEnd, traceStart } from '@triplyetl/etl/debug'","title":"Overview"},{"location":"triply-etl/generic/debug/#function-logmemory","text":"This function prints information about the current memory consumption. It includes the following fields: Field name Meaning Use case CallCount The number of times that a specific use of logMemory() can been invoked. Find a location in your ETL script that is visited many times, e.g. because it occurs inside a (nested) loop. RecordId The numeric identifier of the record that is currently processed. Find a specific record that causes memory consumption to increase. Heap used The number of megabytes that are currently used on the heap. Find places in your ETL where an unexpected amount of memory is used. Heap total The number of megabytes that are currently allocated on the heap. Find places in your ETL where memory reallocation occurs. The following code snippet prints the memory consumption of TriplyETL for each record (first call), and for each member of key 'a' (second call): fromJson([{ a: [{ b: 1 }, { b: 2 }] }, { a: [] }, { a: [] }]), logMemory(), forEach('a', logMemory()), This prints the following messages to standard output: Info CallCount: 1 | RecordId: 1 | Heap (MB) used: 92 / total: 122 Info CallCount: 1 | RecordId: 1 | Heap (MB) used: 92 / total: 122 Info CallCount: 2 | RecordId: 1 | Heap (MB) used: 92 / total: 122 Info CallCount: 2 | RecordId: 2 | Heap (MB) used: 92 / total: 122 Info CallCount: 3 | RecordId: 2 | Heap (MB) used: 92 / total: 122","title":"Function logMemory()"},{"location":"triply-etl/generic/debug/#function-logquads","text":"This function prints the current contents of the internal store to standard output. The following snippet asserts one triple into the default graph of the internal store, and then prints the contents of the internal store: fromJson([{}]), triple(rdfs.Class, a, rdfs.Class), logQuads(), This results in the following output: @prefix rdf: . @prefix rdfs: . @prefix sdo: . { rdfs:Class a rdfs:Class }","title":"Function logQuads()"},{"location":"triply-etl/generic/debug/#function-logquery","text":"This function prints a query string to standard output. This is specifically useful when the query string is stored in an external system, e.g. a SPARQL query string that is stored on a TriplyDB server: logQuery(Source.TriplyDb.query('my-account', 'my-query')), Depending on the query string that is stored in 'my-query' , this could result in the following output: select * { ?s ?p ?o. } limit 10","title":"Function logQuery()"},{"location":"triply-etl/generic/debug/#function-logrecord","text":"This function prints the current state of the record to standard output. The record is a generic representation of the data that is extracted from one of the data sources (see the Record documentation page for more information). The following snippet prints the inline JSON record to standard output: fromJson([{ a: 1 }]), logRecord(), This emits the following: { \"a\": 1, \"$recordId\": 1, \"$environment\": \"Development\" }","title":"Function logRecord()"},{"location":"triply-etl/generic/debug/#use-when-writing-a-new-etl","text":"When writing a new ETL, logRecord() is often used as the first function to invoke immediately after extracting the record. For example: fromJson(Source.url('https://example.com/some/api/call')), logRecord(), Since this prints a full overview of what is available in the data source, this forms a good starting point for writing the rest of the ETL configurations.","title":"Use when writing a new ETL"},{"location":"triply-etl/generic/debug/#observe-the-effects-of-transformations","text":"Another common use case for logRecord() is to observe the record at different moments in time. This is specifically useful to observe the effects of transformation functions , since these are the functions that modify the record. The following snippet logs the record directly before and directly after the transformation function split() is called. fromJson([{ a: '1, 2, 3' }]), logRecord(), split({ content: 'a', separator: ',', key: 'b' }), logRecord(), This makes it easy to observe the result of applying the transformation function: Running lib/main.js { \"a\": \"1, 2, 3\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"a\": \"1, 2, 3\", \"$recordId\": 1, \"$environment\": \"Development\", \"b\": [ \"1\", \"2\", \"3\" ] }","title":"Observe the effects of transformations"},{"location":"triply-etl/generic/debug/#log-a-specific-key","text":"Since records can be quite long, in some cases it may be easier to print only a specific key. The following code snippet only prints the key that was added by the transformation function: fromJson([{ a: '1, 2, 3' }]), split({ content: 'a', separator: ',', key: 'b' }), logRecord({ key: 'b' }), This results in the following output: [ \"1\", \"2\", \"3\" ]","title":"Log a specific key"},{"location":"triply-etl/generic/debug/#functions-tracestart-and-traceend","text":"Sometimes you are interested to find one specific record based on a certain value of a key and/or to see the changes in this record made by specific middlewares. For these purposes, trace middleware can be used. Below, there is an example of how this middleware can be used: fromJson([ { a: 1, b: 1 }, // first dummy record { a: 2, b: 2 }, // second dummy record ]), change({key:'a', type:'number', change: (val) => val +100}), // change the 'a' key traceStart(), change({key:'b', type:'number', change: (val) => val +100}), // change the 'b' key traceEnd(), The result would be: \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 Record trace information \u2502 \u2502 { \u2502 \u2502 \"a\": 101, \u2502 \u2502 \"b\": 1 \u2502 \u2502 \"b\": 101 \u2502 \u2502 } \u2502 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 Quads trace information (unchanged) \u2502 \u2502 empty \u2502 \u2502 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 To rerun the traced middlewares for this record use the following command: > npx etl lib/{script-name} --trace .trace-1650542307095 In your terminal the line with \"b\": 1 will be red colored, showing the previous state of this key-value and the line with \"b\": 101 will be green colored, showing the new state. Also you can rerun the trace information for this specific record by running: npx etl lib/{script-name} --trace .trace-1650542307095","title":"Functions traceStart() and traceEnd()"},{"location":"triply-etl/generic/declarations/","text":"On this page: Declarations Introduction What is a declaration? Why use declarations? How to import declaration functionality? Base IRI declaration Prefix declarations Individual prefix declarations Example Prefix declaration tree / IRI strategy Example Term declarations Concept term declarations Vocabulary term declarations Shape term declarations Individual term declarations Graph name declarations External vocabularies Example: Using the PREMIS external vocabulary Language tag declarations Shorthand declarations The standard shorthand Example User-defined shorthands Example Geospatial declarations Declarations \u00b6 This page documents the declaration functionalities that are supported by TriplyETL. Introduction \u00b6 This section explains what declarations are, and what are the benefits of using them. What is a declaration? \u00b6 A declarations introduces a constant that can be (re)used throughout the rest of the ETL configuration. This is best shown through an example. The following code snippet asserts that John Doe is a person. It uses the following components that probably occur multiple times in the same ETL configuration: The namespace for this dataset is . The IRI for each person in the dataset starts with . The IRI is used by the external RDF vocabulary to relate instances to their class. The IRI is used by the external FOAF vocabulary to denote the set of all persons. triple( iri('https://triplydb.com/my-account/my-dataset/id/person/john-doe'), iri('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), iri('http://xmlns.com/foaf/0.1/Person') ), By using declarations, we can use constants to abbreviate the components that occur many times. This results in the following, identical assertion: triple(iri(prefix.person, str('john-doe')), a, foaf.Person), Why use declarations? \u00b6 The use of declarations has the following benefits: Readability : Shorter expressions are easier to read. See the example from the previous subsection for an example. Modularity : By putting all declarations in one place, it is easy to include them into multiple places in the ETL configuration. Maintenance : A configuration change that is made in a declaration immediately becomes available to all the locations in which it is used. This makes it easy to update things like namespaces, that would be cumbersome and error-prone to change in each location of use. Editor support : Declarations support auto-complete functionality in text editors. When documentation is included, this is shown alongside the declarations in text editors. How to import declaration functionality? \u00b6 Declaration objects are found in the following modules: import { Iri } from '@triplyetl/etl/generic' import { sdo, owl, sh, skos } from '@triplyetl/vocabularies' Base IRI declaration \u00b6 Within one dataset, it is common for the majority of IRIs to share the same IRI prefix or 'namespace'. It is convenient to declare this shared IRI prefix or namespace once, and use it throughout the rest of the ETL. This most broadly shared IRI prefix is called the base IRI . In TriplyETL, the base IRI is declared in the following way: const baseIri = Iri('https://triplydb.com/triply/iris/') Notice that the base IRI that is declared above, is also the location where the dataset that uses this base IRI can be found: https://triplydb.com/triply/iris/ . This is the dereferencing principle of linked data: IRIs are used for both naming \u00e1nd locating data. Prefix declarations \u00b6 Linked data uses IRIs for uniquely identifying most data items. Since IRIs can be long and complex, it is common to declare shorter aliases that can be used to abbreviate them. Such aliases are introduced in prefix declarations . Individual prefix declarations \u00b6 A new prefix declaration is created by using the concat() member function on an existing IRI object. The concat() function specifies the string that is added to the existing IRI. The added string must meet the syntactic criteria for the path segment component in IRI syntax (see RFC 3987 for the official syntax). It is common practice to end the added string with a forward slash, which ends a path segment in IRI syntax. Prefix declarations are often based off of the base IRI , since that is the prefix IRI that is shared by most IRIs in a dataset. Example \u00b6 The following code snippet declares a base IRI, and then adds the following two prefix declarations: Alias prefix_id abbreviates IRI , which is used by all IRIs that denote instances. Alias prefix_model abbreviates IRI , which is used by all IRIs that are used in the data model. import { Iri } from '@triplyetl/etl/generic' const baseIri = Iri('https://triplydb.com/my-account/my-dataset/') const prefix_id = baseIri.concat('id/') const prefix_model = baseIri.concat('model/') Prefix declaration tree / IRI strategy \u00b6 It is common to declare the base IRI declaration and all prefix declarations in one single spot. This consolidates the full IRI strategy for a dataset in one place. This is easy for documentation purposes, since all project members can see the full set of IRI prefixes in one place. And this supports optimal reuse of these declarations throughout the ETL configuration. IRI prefixes form a tree : The root of the tree is the base IRI. The internal nodes of the tree are prefix declarations that are extended by some other prefix declaration. The external nodes or leaves of the tree are prefix declarations that are not extended by another prefix declaration. It is common to declare the leaves of the IRI prefix tree in an object, since such an object can be conveniently used to make term assertions throughout the ETL. Example \u00b6 The following code snippet gives an example of such an IRI prefix tree, where: The base IRI is . The internal nodes are the base IRI, prefix_id , and prefix_model . The leaves are the three prefix declarations that appear in the prefix object. import { Iri } from '@triplyetl/etl/generic' const baseIri = Iri('https://triplydb.com/my-account/my-dataset/') const prefix_id = baseIri.concat('id/') const prefix_model = baseIri.concat('model/') const prefix = { city: prefix_id.concat('city/'), def: prefix_model.concat('def/'), person: prefix_id.concat('person/'), } With the above declarations in place, the following IRI term assertions can be made (see the iri() function for more information): iri(prefix.city, 'name') iri(prefix.city, str('Amsterdam')), iri(prefix.def, str('livesIn')), iri(prefix.person, 'first name') iri(prefix.person, str('John')), Static terms can also be expressed with the concat() member function: prefix.city.concat('Amsterdam') prefix.def.concat('livesIn') prefix.person.concat('John') The following statement assertion can be made (see the triple() function for more information). Notice that it is possible to mix (dynamic and static) iri() term assertions with IRIs created with concat() : triple( iri(prefix.person, 'first name'), iri(prefix.def, str('livesIn')), prefix.city.concat('Amsterdam') ), The statement assertion results in the following linked data: . Term declarations \u00b6 When a term is used in multiple places in the ETL configuration, it is often better to declare it first and (re)use it later. This ensures that changes to the term are applied in every location of use. We will use the following prefix declaration tree in our examples: import { Iri } from '@triplyetl/etl/generic' const baseIri = Iri('https://triplydb.com/my-account/my-dataset/') const prefix_id = baseIri.concat('id/') const prefix_model = baseIri.concat('model/') const prefix = { city: prefix_id.concat('city/'), con: prefix_model.concat('con/'), def: prefix_model.concat('def/'), graph: prefix_id.concat('graph/')), person: prefix_id.concat('person/'), shp: prefix_model.concat('shp/'), } Concept term declarations \u00b6 Concepts are expressed in linked data with SKOS. Concepts are often (re)used in multiple places, and they often form a fixed collection. This makes terms that denote concepts eligible for a term declaration object. The following code snippet declares the terms that denote concepts: const concept = { animal: prefix.con.concat('animal'), mammal: prefix.con.concat('mammal'), } This object can be used through the ETL configuration. For example in the following statement assertion: triple(concept.mammal, skos.broader, concept.animal), Vocabulary term declarations \u00b6 Classes and properties are expressed in linked data with RDFS/OWL. Classes and properties are often (re)used in multiple places, and they often form a fixed vocabulary. This makes terms that denote classes or properties eligible for a term declaration object. The following code snippet declares the terms that denote classes and properties: const def = { City: prefix.def.concat('City'), Person: prefix.def.concat('Person'), livesIn: prefix.def.concat('livesIn'), } Vocabulary term declarations can be used in statement assertions, for example: triple(iri(prefix.city, 'name'), a, def.City), pairs(iri(prefix.person, 'first name'), [a, def.Person], [def.livesIn, iri(prefix.city, 'name')], ), This results in the following linked data: city:Amsterdam a def:City. person:John a def:Person; def:livesIn city:Amsterdam. Or diagrammatically: graph LR john -- a --> Person john -- def:livesIn --> amsterdam Person[def:Person]:::model amsterdam[city:Amsterdam]:::data john[person:John]:::data classDef data fill:yellow classDef model fill:lightblue Shape term declarations \u00b6 Shapes are expressed in linked data with SHACL. shapes are often (re)used in multiple places, and they often form a fixed vocabulary. This makes terms that denote shapes eligible for a term declaration object. The following code snippet declares the terms that denote shapes: const shp = { City: prefix.shp.concat('City'), Person: prefix.shp.concat('Person'), Person_livesIn: prefix.shp.concat('livesIn'), } This object can be used through the ETL configuration. For example in the following statement assertions: pairs(shp.Person, [a, sh.NodeShape], [sh.property, shp.Person_livesIn], [sh.targetClass, def.Person], ), pairs(shp.Person_livesIn, [a, sh.PropertyShape], [sh.class, def.City], [sh.path, def:livesIn], ), This results in the following linked data: shp:Person a sh:NodeShape; sh:property shp:Person_livesIn; sh:targetClass def:Person. shp:Person_livesIn a sh:PropertyShape; sh:class def:City; sh:path def:livesIn. Or diagrammatically: graph LR shp_Person -- a --> sh:NodeShape shp_Person -- sh:property --> shp_Person_livesIn shp_Person -- sh:targetClass --> def_Person shp_Person_livesIn -- a --> sh:PropertyShape shp_Person_livesIn -- sh:class --> def_City shp_Person_livesIn -- sh:path --> def_livesIn def_City[def:City]:::model def_Person[def:Person]:::model def_livesIn[def:livesIn]:::model shp_Person[shp:Person]:::shape shp_Person_livesIn[shp:Person_livesIn]:::shape classDef model fill:lightblue classDef shape fill:orange Individual term declarations \u00b6 Individuals are instances of classes. For example, John Doe is an individual of class def:Person ; Amsterdam is an individual of class def:City . If terms that denote individuals are used multiple times in an ETL configuration, term declarations may be introduced for them. The following code snippet declares the terms that denote individual persons: const person = { jane: prefix.person('Jane'), john: prefix.person('John'), mary: prefix.person('Mary'), } Instance term declarations can be used in statement assertions, for example: triple(person.john, foaf.knows, person.mary), This results in the following linked data: person:john foaf:knows person:mary. Graph name declarations \u00b6 Linked data statements belong to graphs. Graphs are denoted by graph names. For example, a graph name may denote a graph that contains metadata statements, while another graph name may denote a graph that contains instance data. If graph names are used multiple times in an ETL configuration, term declarations may be introduced for them. The following code snippet declares three graph names: const graph = { metadata: prefix.graph.concat('metadata'), model: prefix.graph.concat('model'), instances: prefix.graph.concat('instances'), } The declared graph names can now be used in statement assertions: triples(graph.metadata, ['_dataset', a, dcat.Dataset], ['_dataset', rdfs.label, str('My Dataset')], ), External vocabularies \u00b6 In linked data, it is common to reuse existing vocabularies. Popular vocabularies can be imported from the TriplyETL vocabulary library. See the table of currently supported vocabularies for a full overview. The following example imports three vocabularies (FOAF, OWL, PREMIS): import { foaf, owl, premis } from '@triplyetl/vocabularies' This allows you to make the following statement assertion: triple(foaf.Person, a, owl.Class), Notice that the notation in TriplyETL comes very close to the notation in the standardized linked data syntax for Turtle, TriG, and SPARQL. For the example above: foaf:Person a owl:Class. Example: Using the PREMIS external vocabulary \u00b6 The following code snippet uses the external PREMIS vocabulary. This vocabulary is published by the Library of Congress and is used to publish metadata about the preservation of digital objects. The following code snippet asserts that a PREMIS file is stored in a PREMIS storage location: pairs(iri(prefix.file, 'File ID'), [a, premis.File], [premis.storedAt, iri(prefix.location, 'Location ID')], ), triple(iri(prefix.location, 'Location ID'), a, premis.StorageLocation), Language tag declarations \u00b6 Linked data includes support for language-tagged strings . These are literals that specify a string value and a code that denotes the natural language in which that string value should be interpreted. The natural language tags follow a syntax that is standardized in RFC 5646 , and must occur in the Language Subtag Registry that is maintained by IANA. TriplyETL includes declarations for these natural language tags. They can be imported as follows: import { language } from '@triplyetl/vocabularies' Language tag declaration can be used in literal() term assertions: literal(str('Nederland'), language.nl) literal(str('Netherlangs'), language.en) Language tag declarations can also be used in addLiteral() transformations: addLiteral({ content: 'label', languageTag: lang.fr, key: '_languageTaggedString', }), Shorthand declarations \u00b6 Shorthands are convenient names that stand for commonly used IRIs. There is one standard shorthand ( a ), and TriplyETL allows other shorthands to be declared as needed. The standard shorthand \u00b6 The standardized linked data syntax for Turtle, TriG, and SPARQL allow the shorthand a to be used to stand for the rdf:type property. TriplyETL supports this standard shorthand, which can be imported from the vocabulary library: import { a } from '@triplyetl/vocabularies' In the standardized linked data syntax for Turtle, TriG and SPARQL, this shorthand can only be used in the predicate position. This restriction is not enforced in TriplyETL, where the a shorthand can be used in the subject, predicate, object, and even graph position. Example \u00b6 The following code snippet makes a true statement assertion, while using the stands shorthand twice: triple(a, a, rdf.Property), This results in the following linked data: rdf:type a rdf:Property. User-defined shorthands \u00b6 TriplyETL allows the introduction of arbitrary, user-defined shorthands. User-defined shorthands can make linked data assertions in the ETL configuration more readable for users from certain domains. For example, \"is a\" is a commonly used phrase in many modeling languages to denote the subsumption relation. Example \u00b6 The following code snippet declares is_a as a user-defined shorthand for the rdfs:subClassOf property (which is the subsumption relation in linked data): import { foaf, rdfs } from '@triplyetl/vocabularies' const is_a = rdfs.subClassOf This declaration is used in the following statement assertion: triple(foaf.Person, is_a, foaf.Agent), This results in the following linked data: foaf:Person rdfs:subClassOf foaf:Agent. Geospatial declarations \u00b6 TriplyETL includes declarations for geospatial coordinate reference systems. These are identified by EPSG codes, and can imported as follows: import { epsg } from '@triplyetl/vocabularies' EPSG codes can be used in geospatial transformation functions like geojsonToWkt() : geojsonToWkt({ content: 'geojson', crs: epsg[28992], key: '_wkt', }),","title":"Declarations"},{"location":"triply-etl/generic/declarations/#declarations","text":"This page documents the declaration functionalities that are supported by TriplyETL.","title":"Declarations"},{"location":"triply-etl/generic/declarations/#introduction","text":"This section explains what declarations are, and what are the benefits of using them.","title":"Introduction"},{"location":"triply-etl/generic/declarations/#what-is-a-declaration","text":"A declarations introduces a constant that can be (re)used throughout the rest of the ETL configuration. This is best shown through an example. The following code snippet asserts that John Doe is a person. It uses the following components that probably occur multiple times in the same ETL configuration: The namespace for this dataset is . The IRI for each person in the dataset starts with . The IRI is used by the external RDF vocabulary to relate instances to their class. The IRI is used by the external FOAF vocabulary to denote the set of all persons. triple( iri('https://triplydb.com/my-account/my-dataset/id/person/john-doe'), iri('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), iri('http://xmlns.com/foaf/0.1/Person') ), By using declarations, we can use constants to abbreviate the components that occur many times. This results in the following, identical assertion: triple(iri(prefix.person, str('john-doe')), a, foaf.Person),","title":"What is a declaration?"},{"location":"triply-etl/generic/declarations/#why-use-declarations","text":"The use of declarations has the following benefits: Readability : Shorter expressions are easier to read. See the example from the previous subsection for an example. Modularity : By putting all declarations in one place, it is easy to include them into multiple places in the ETL configuration. Maintenance : A configuration change that is made in a declaration immediately becomes available to all the locations in which it is used. This makes it easy to update things like namespaces, that would be cumbersome and error-prone to change in each location of use. Editor support : Declarations support auto-complete functionality in text editors. When documentation is included, this is shown alongside the declarations in text editors.","title":"Why use declarations?"},{"location":"triply-etl/generic/declarations/#how-to-import-declaration-functionality","text":"Declaration objects are found in the following modules: import { Iri } from '@triplyetl/etl/generic' import { sdo, owl, sh, skos } from '@triplyetl/vocabularies'","title":"How to import declaration functionality?"},{"location":"triply-etl/generic/declarations/#base-iri-declaration","text":"Within one dataset, it is common for the majority of IRIs to share the same IRI prefix or 'namespace'. It is convenient to declare this shared IRI prefix or namespace once, and use it throughout the rest of the ETL. This most broadly shared IRI prefix is called the base IRI . In TriplyETL, the base IRI is declared in the following way: const baseIri = Iri('https://triplydb.com/triply/iris/') Notice that the base IRI that is declared above, is also the location where the dataset that uses this base IRI can be found: https://triplydb.com/triply/iris/ . This is the dereferencing principle of linked data: IRIs are used for both naming \u00e1nd locating data.","title":"Base IRI declaration"},{"location":"triply-etl/generic/declarations/#prefix-declarations","text":"Linked data uses IRIs for uniquely identifying most data items. Since IRIs can be long and complex, it is common to declare shorter aliases that can be used to abbreviate them. Such aliases are introduced in prefix declarations .","title":"Prefix declarations"},{"location":"triply-etl/generic/declarations/#individual-prefix-declarations","text":"A new prefix declaration is created by using the concat() member function on an existing IRI object. The concat() function specifies the string that is added to the existing IRI. The added string must meet the syntactic criteria for the path segment component in IRI syntax (see RFC 3987 for the official syntax). It is common practice to end the added string with a forward slash, which ends a path segment in IRI syntax. Prefix declarations are often based off of the base IRI , since that is the prefix IRI that is shared by most IRIs in a dataset.","title":"Individual prefix declarations"},{"location":"triply-etl/generic/declarations/#example","text":"The following code snippet declares a base IRI, and then adds the following two prefix declarations: Alias prefix_id abbreviates IRI , which is used by all IRIs that denote instances. Alias prefix_model abbreviates IRI , which is used by all IRIs that are used in the data model. import { Iri } from '@triplyetl/etl/generic' const baseIri = Iri('https://triplydb.com/my-account/my-dataset/') const prefix_id = baseIri.concat('id/') const prefix_model = baseIri.concat('model/')","title":"Example"},{"location":"triply-etl/generic/declarations/#prefix-declaration-tree-iri-strategy","text":"It is common to declare the base IRI declaration and all prefix declarations in one single spot. This consolidates the full IRI strategy for a dataset in one place. This is easy for documentation purposes, since all project members can see the full set of IRI prefixes in one place. And this supports optimal reuse of these declarations throughout the ETL configuration. IRI prefixes form a tree : The root of the tree is the base IRI. The internal nodes of the tree are prefix declarations that are extended by some other prefix declaration. The external nodes or leaves of the tree are prefix declarations that are not extended by another prefix declaration. It is common to declare the leaves of the IRI prefix tree in an object, since such an object can be conveniently used to make term assertions throughout the ETL.","title":"Prefix declaration tree / IRI strategy"},{"location":"triply-etl/generic/declarations/#example_1","text":"The following code snippet gives an example of such an IRI prefix tree, where: The base IRI is . The internal nodes are the base IRI, prefix_id , and prefix_model . The leaves are the three prefix declarations that appear in the prefix object. import { Iri } from '@triplyetl/etl/generic' const baseIri = Iri('https://triplydb.com/my-account/my-dataset/') const prefix_id = baseIri.concat('id/') const prefix_model = baseIri.concat('model/') const prefix = { city: prefix_id.concat('city/'), def: prefix_model.concat('def/'), person: prefix_id.concat('person/'), } With the above declarations in place, the following IRI term assertions can be made (see the iri() function for more information): iri(prefix.city, 'name') iri(prefix.city, str('Amsterdam')), iri(prefix.def, str('livesIn')), iri(prefix.person, 'first name') iri(prefix.person, str('John')), Static terms can also be expressed with the concat() member function: prefix.city.concat('Amsterdam') prefix.def.concat('livesIn') prefix.person.concat('John') The following statement assertion can be made (see the triple() function for more information). Notice that it is possible to mix (dynamic and static) iri() term assertions with IRIs created with concat() : triple( iri(prefix.person, 'first name'), iri(prefix.def, str('livesIn')), prefix.city.concat('Amsterdam') ), The statement assertion results in the following linked data: .","title":"Example"},{"location":"triply-etl/generic/declarations/#term-declarations","text":"When a term is used in multiple places in the ETL configuration, it is often better to declare it first and (re)use it later. This ensures that changes to the term are applied in every location of use. We will use the following prefix declaration tree in our examples: import { Iri } from '@triplyetl/etl/generic' const baseIri = Iri('https://triplydb.com/my-account/my-dataset/') const prefix_id = baseIri.concat('id/') const prefix_model = baseIri.concat('model/') const prefix = { city: prefix_id.concat('city/'), con: prefix_model.concat('con/'), def: prefix_model.concat('def/'), graph: prefix_id.concat('graph/')), person: prefix_id.concat('person/'), shp: prefix_model.concat('shp/'), }","title":"Term declarations"},{"location":"triply-etl/generic/declarations/#concept-term-declarations","text":"Concepts are expressed in linked data with SKOS. Concepts are often (re)used in multiple places, and they often form a fixed collection. This makes terms that denote concepts eligible for a term declaration object. The following code snippet declares the terms that denote concepts: const concept = { animal: prefix.con.concat('animal'), mammal: prefix.con.concat('mammal'), } This object can be used through the ETL configuration. For example in the following statement assertion: triple(concept.mammal, skos.broader, concept.animal),","title":"Concept term declarations"},{"location":"triply-etl/generic/declarations/#vocabulary-term-declarations","text":"Classes and properties are expressed in linked data with RDFS/OWL. Classes and properties are often (re)used in multiple places, and they often form a fixed vocabulary. This makes terms that denote classes or properties eligible for a term declaration object. The following code snippet declares the terms that denote classes and properties: const def = { City: prefix.def.concat('City'), Person: prefix.def.concat('Person'), livesIn: prefix.def.concat('livesIn'), } Vocabulary term declarations can be used in statement assertions, for example: triple(iri(prefix.city, 'name'), a, def.City), pairs(iri(prefix.person, 'first name'), [a, def.Person], [def.livesIn, iri(prefix.city, 'name')], ), This results in the following linked data: city:Amsterdam a def:City. person:John a def:Person; def:livesIn city:Amsterdam. Or diagrammatically: graph LR john -- a --> Person john -- def:livesIn --> amsterdam Person[def:Person]:::model amsterdam[city:Amsterdam]:::data john[person:John]:::data classDef data fill:yellow classDef model fill:lightblue","title":"Vocabulary term declarations"},{"location":"triply-etl/generic/declarations/#shape-term-declarations","text":"Shapes are expressed in linked data with SHACL. shapes are often (re)used in multiple places, and they often form a fixed vocabulary. This makes terms that denote shapes eligible for a term declaration object. The following code snippet declares the terms that denote shapes: const shp = { City: prefix.shp.concat('City'), Person: prefix.shp.concat('Person'), Person_livesIn: prefix.shp.concat('livesIn'), } This object can be used through the ETL configuration. For example in the following statement assertions: pairs(shp.Person, [a, sh.NodeShape], [sh.property, shp.Person_livesIn], [sh.targetClass, def.Person], ), pairs(shp.Person_livesIn, [a, sh.PropertyShape], [sh.class, def.City], [sh.path, def:livesIn], ), This results in the following linked data: shp:Person a sh:NodeShape; sh:property shp:Person_livesIn; sh:targetClass def:Person. shp:Person_livesIn a sh:PropertyShape; sh:class def:City; sh:path def:livesIn. Or diagrammatically: graph LR shp_Person -- a --> sh:NodeShape shp_Person -- sh:property --> shp_Person_livesIn shp_Person -- sh:targetClass --> def_Person shp_Person_livesIn -- a --> sh:PropertyShape shp_Person_livesIn -- sh:class --> def_City shp_Person_livesIn -- sh:path --> def_livesIn def_City[def:City]:::model def_Person[def:Person]:::model def_livesIn[def:livesIn]:::model shp_Person[shp:Person]:::shape shp_Person_livesIn[shp:Person_livesIn]:::shape classDef model fill:lightblue classDef shape fill:orange","title":"Shape term declarations"},{"location":"triply-etl/generic/declarations/#individual-term-declarations","text":"Individuals are instances of classes. For example, John Doe is an individual of class def:Person ; Amsterdam is an individual of class def:City . If terms that denote individuals are used multiple times in an ETL configuration, term declarations may be introduced for them. The following code snippet declares the terms that denote individual persons: const person = { jane: prefix.person('Jane'), john: prefix.person('John'), mary: prefix.person('Mary'), } Instance term declarations can be used in statement assertions, for example: triple(person.john, foaf.knows, person.mary), This results in the following linked data: person:john foaf:knows person:mary.","title":"Individual term declarations"},{"location":"triply-etl/generic/declarations/#graph-name-declarations","text":"Linked data statements belong to graphs. Graphs are denoted by graph names. For example, a graph name may denote a graph that contains metadata statements, while another graph name may denote a graph that contains instance data. If graph names are used multiple times in an ETL configuration, term declarations may be introduced for them. The following code snippet declares three graph names: const graph = { metadata: prefix.graph.concat('metadata'), model: prefix.graph.concat('model'), instances: prefix.graph.concat('instances'), } The declared graph names can now be used in statement assertions: triples(graph.metadata, ['_dataset', a, dcat.Dataset], ['_dataset', rdfs.label, str('My Dataset')], ),","title":"Graph name declarations"},{"location":"triply-etl/generic/declarations/#external-vocabularies","text":"In linked data, it is common to reuse existing vocabularies. Popular vocabularies can be imported from the TriplyETL vocabulary library. See the table of currently supported vocabularies for a full overview. The following example imports three vocabularies (FOAF, OWL, PREMIS): import { foaf, owl, premis } from '@triplyetl/vocabularies' This allows you to make the following statement assertion: triple(foaf.Person, a, owl.Class), Notice that the notation in TriplyETL comes very close to the notation in the standardized linked data syntax for Turtle, TriG, and SPARQL. For the example above: foaf:Person a owl:Class.","title":"External vocabularies"},{"location":"triply-etl/generic/declarations/#example-using-the-premis-external-vocabulary","text":"The following code snippet uses the external PREMIS vocabulary. This vocabulary is published by the Library of Congress and is used to publish metadata about the preservation of digital objects. The following code snippet asserts that a PREMIS file is stored in a PREMIS storage location: pairs(iri(prefix.file, 'File ID'), [a, premis.File], [premis.storedAt, iri(prefix.location, 'Location ID')], ), triple(iri(prefix.location, 'Location ID'), a, premis.StorageLocation),","title":"Example: Using the PREMIS external vocabulary"},{"location":"triply-etl/generic/declarations/#language-tag-declarations","text":"Linked data includes support for language-tagged strings . These are literals that specify a string value and a code that denotes the natural language in which that string value should be interpreted. The natural language tags follow a syntax that is standardized in RFC 5646 , and must occur in the Language Subtag Registry that is maintained by IANA. TriplyETL includes declarations for these natural language tags. They can be imported as follows: import { language } from '@triplyetl/vocabularies' Language tag declaration can be used in literal() term assertions: literal(str('Nederland'), language.nl) literal(str('Netherlangs'), language.en) Language tag declarations can also be used in addLiteral() transformations: addLiteral({ content: 'label', languageTag: lang.fr, key: '_languageTaggedString', }),","title":"Language tag declarations"},{"location":"triply-etl/generic/declarations/#shorthand-declarations","text":"Shorthands are convenient names that stand for commonly used IRIs. There is one standard shorthand ( a ), and TriplyETL allows other shorthands to be declared as needed.","title":"Shorthand declarations"},{"location":"triply-etl/generic/declarations/#the-standard-shorthand","text":"The standardized linked data syntax for Turtle, TriG, and SPARQL allow the shorthand a to be used to stand for the rdf:type property. TriplyETL supports this standard shorthand, which can be imported from the vocabulary library: import { a } from '@triplyetl/vocabularies' In the standardized linked data syntax for Turtle, TriG and SPARQL, this shorthand can only be used in the predicate position. This restriction is not enforced in TriplyETL, where the a shorthand can be used in the subject, predicate, object, and even graph position.","title":"The standard shorthand"},{"location":"triply-etl/generic/declarations/#example_2","text":"The following code snippet makes a true statement assertion, while using the stands shorthand twice: triple(a, a, rdf.Property), This results in the following linked data: rdf:type a rdf:Property.","title":"Example"},{"location":"triply-etl/generic/declarations/#user-defined-shorthands","text":"TriplyETL allows the introduction of arbitrary, user-defined shorthands. User-defined shorthands can make linked data assertions in the ETL configuration more readable for users from certain domains. For example, \"is a\" is a commonly used phrase in many modeling languages to denote the subsumption relation.","title":"User-defined shorthands"},{"location":"triply-etl/generic/declarations/#example_3","text":"The following code snippet declares is_a as a user-defined shorthand for the rdfs:subClassOf property (which is the subsumption relation in linked data): import { foaf, rdfs } from '@triplyetl/vocabularies' const is_a = rdfs.subClassOf This declaration is used in the following statement assertion: triple(foaf.Person, is_a, foaf.Agent), This results in the following linked data: foaf:Person rdfs:subClassOf foaf:Agent.","title":"Example"},{"location":"triply-etl/generic/declarations/#geospatial-declarations","text":"TriplyETL includes declarations for geospatial coordinate reference systems. These are identified by EPSG codes, and can imported as follows: import { epsg } from '@triplyetl/vocabularies' EPSG codes can be used in geospatial transformation functions like geojsonToWkt() : geojsonToWkt({ content: 'geojson', crs: epsg[28992], key: '_wkt', }),","title":"Geospatial declarations"},{"location":"triply-etl/generic/getting-started/","text":"On this page: TriplyETL Getting Started Prerequisites Minimum versions Update the prerequisites TriplyETL Generator TriplyETL Runner TriplyETL Library TriplyETL Getting Started \u00b6 This page helps you to get started with TriplyETL. You can get started with TriplyETL in any of the following ways: TriplyETL Generator creates a new ETL project based on your answers to a set of question. TriplyETL Runner runs an existing ETL project. TriplyETL Library can be included as a dependency in your TypeScript project. Prerequisites \u00b6 In order to use TriplyETL, you must first install the following programs on your computer: Install Git Go to this link and follow the instructions for your operating system (Windows, macOS, or Linux). Run the following commands to set your user name and email in Git: git config --global user.email \"ada@triply.cc\" git config --global user.name \"Ada Lovelace\" This information will be used in the Git version history for the TriplyETL project. This allows your team to keep track of who made which change. Install Node.js (simple approach) Go to nodejs.org and click on option \u201c18.x.y LTS (Recommended For Most Users)\u201d. This will download the installer for your operating system. Run the installer on your computer. On Windows, you must also select the number of bits on your computer: 32 or 64. The correct number of bits is 64 for almost all Windows computers. Install Node.js (advanced approach) For more advanced us, you can install the Node Version Manager ( nvm ). This allows you to install multiple versions of Node.js on the same computer. See the following links for more information: On Windows You can following the official instructions from Microsoft for installing NVM on Windows. On macOS or Linux You can follow the instructions for installing NVM on any operating system (including macOS and Linux). Find a terminal application You must use a terminal application in order to run commands from the TriplyETL CLI . Here are some examples of terminal applications on different operating systems: On Windows Most Windows versions come with some version of PowerShell preinstalled. You can also follow these instructions by Microsoft to update to the latest version of PowerShell. On macOS Most macOS version come with a Terminal application preinstalled. On Linux Most Linux versions come with a preinstalled terminal application. For example, on Ubuntu the GNOME Terminal application is preinstalled. Minimum versions \u00b6 TriplyETL requires the following minimum versions for the prerequisites: NPM v10.2.1 Node.js v18 Update the prerequisites \u00b6 Update NPM If you have NPM installed, you can run npm -v to see your current version. If you want to upgrade to a different version (for example 10.2.1), you can run the following command: npm install -g npm@10.2.1 For more information see NPM's \"try latest stable version of npm\" documentation. Update Node.js If you have Node.js installed, you can run node -v to see your current version. If you want to upgrade to a different version you need to use a Node package manager (e.g. nvm , n etc.). For more information visit Installing Node.js via package manager . Update Git If you have Git installed, you can run git -v to see your current version, if you want to upgrade to latest version you can: Linux Run command sudo apt-get update && sudo apt-get install git Windows What you need to do depends on your current Git version: Older than 2.14.1 Uninstall Git from your system and reinstall Git for Windows . Between 2.14.2 and 2.16.1 Run command git update Greater than or equal to 2.16.1 Run command git update-git-for-windows MacOS (with Homebrew) Install Homebrew Run command brew update && brew install git && brew upgrade git TriplyETL Generator \u00b6 The TriplyETL Generator allows you to create new ETL projects in your terminal application. If a TriplyETL project already exists, use the TriplyETL Runner instead. In order to use TriplyETL Generator, you must have: Satisfied the prerequisites . A TriplyETL License Key. Contact info@triply.cc to obtain a License Key for your organization. A user account on a TriplyDB server. Contact info@triply.cc to set up a TriplyDB server for your organization, or create a free account on https://triplydb.com . Run the following command to use the TriplyETL Generator: npx @triply/etl-generator If you use TriplyETL Generator for the first time, this command automatically downloads and installs the latest version on your computer. If you have used TriplyETL Generator in the past, this command automatically updates your installation to the latest version, if one is available. TriplyETL Generator will ask the following questions (the exact sequence of questions depends on the answers given): a. TriplyETL License Key b. Project name c. Target folder d. Dataset name e. TriplyDB API Token f. TriplyDB URL g. TriplyDB email h. TriplyDB password Here is an example of a possible run: ? TriplyETL License Key: [hidden] ? Project name: my-etl ? Target folder: my-etl ? Dataset name: my-etl ? Create a new TriplyDB API Token? Yes ? Your TriplyDB URL: my-org.triply.cc ? Your TriplyDB email: my-account@my-organization.com ? Your TriplyDB password: [hidden] \ud83c\udfc1 Your project my-etl is ready for use in my-etl. Go to the target folder that you have specified: cd my-etl You can now use the TriplyETL Runner to run the ETL: npx etl TriplyETL Runner \u00b6 The TriplyETL Runner allows you to run a local TriplyETL project in your terminal application. In order to use TriplyETL Runner, you must have: Satisfied the prerequisites . A user account on a TriplyDB server. Contact info@triply.cc to set up a TriplyDB server for your organization, or create a free account on https://triplydb.com . Perform the following steps to use the TriplyETL Runner: Create a local copy of an existing ETL project. If you do not have access to an existing TriplyETL project yet, use the TriplyETL Generator to create a new one. If you have access to an existing TriplyETL project, use the following command to make a local copy with Git: git clone ssh://git@git.triply.cc:10072/customers/my-org/my-project.git Once you have created a local copy of an existing ETL project, go into the corresponding directory: cd my-project Install the dependencies: npm i Transpile the TypeScript files into JavaScript: npm run build You can now use the TriplyETL Runner: npx etl At this point, you should see a first TriplyETL process in your terminal application. If this is not the case, please contact support@triply.cc to help you out. Visit the TriplyETL CLI documentation to learn more about how you can use the TriplyETL Runner. Visit the TriplyETL CI/CD documentation to learn more about how you can automate TriplyETL runs. TriplyETL Library \u00b6 If you are a software developer that is building a software application in TypeScript, you can include the TriplyETL Library in your project. In order to use the TriplyETL Library, you must have: Satisfied the prerequisites . A TriplyETL License Key. Contact info@triply.cc to obtain a License Key for your organization. A user account on a TriplyDB server. Contact info@triply.cc to set up a TriplyDB server for your organization, or create a free account on https://triplydb.com . Perform the following steps to use the TriplyETL Library: Open the file .npmrc in your text editor, or create the file if it does not yet exist. Add the following content: @triplydb:registry=https://git.triply.cc/api/v4/packages/npm/ @triplyetl:registry=https://git.triply.cc/api/v4/packages/npm/ //git.triply.cc/api/v4/packages/npm/:_authToken={LICENSE_KEY} Replace {LICENSE_KEY} with your TriplyETL License Key. Contact support@triply.cc if you do not have such a license key yet. Run the following command to add the TriplyETL dependency to your package.json file: npm i @triplyetl/etl Open one of the TypeScript files in your software project. When you add the following line to the top of your file, it should be recognized by your TypeScript editor: import { sdo } from '@triplyetl/vocabularies'","title":"Getting Started"},{"location":"triply-etl/generic/getting-started/#triplyetl-getting-started","text":"This page helps you to get started with TriplyETL. You can get started with TriplyETL in any of the following ways: TriplyETL Generator creates a new ETL project based on your answers to a set of question. TriplyETL Runner runs an existing ETL project. TriplyETL Library can be included as a dependency in your TypeScript project.","title":"TriplyETL Getting Started"},{"location":"triply-etl/generic/getting-started/#prerequisites","text":"In order to use TriplyETL, you must first install the following programs on your computer: Install Git Go to this link and follow the instructions for your operating system (Windows, macOS, or Linux). Run the following commands to set your user name and email in Git: git config --global user.email \"ada@triply.cc\" git config --global user.name \"Ada Lovelace\" This information will be used in the Git version history for the TriplyETL project. This allows your team to keep track of who made which change. Install Node.js (simple approach) Go to nodejs.org and click on option \u201c18.x.y LTS (Recommended For Most Users)\u201d. This will download the installer for your operating system. Run the installer on your computer. On Windows, you must also select the number of bits on your computer: 32 or 64. The correct number of bits is 64 for almost all Windows computers. Install Node.js (advanced approach) For more advanced us, you can install the Node Version Manager ( nvm ). This allows you to install multiple versions of Node.js on the same computer. See the following links for more information: On Windows You can following the official instructions from Microsoft for installing NVM on Windows. On macOS or Linux You can follow the instructions for installing NVM on any operating system (including macOS and Linux). Find a terminal application You must use a terminal application in order to run commands from the TriplyETL CLI . Here are some examples of terminal applications on different operating systems: On Windows Most Windows versions come with some version of PowerShell preinstalled. You can also follow these instructions by Microsoft to update to the latest version of PowerShell. On macOS Most macOS version come with a Terminal application preinstalled. On Linux Most Linux versions come with a preinstalled terminal application. For example, on Ubuntu the GNOME Terminal application is preinstalled.","title":"Prerequisites"},{"location":"triply-etl/generic/getting-started/#minimum-versions","text":"TriplyETL requires the following minimum versions for the prerequisites: NPM v10.2.1 Node.js v18","title":"Minimum versions"},{"location":"triply-etl/generic/getting-started/#update-the-prerequisites","text":"Update NPM If you have NPM installed, you can run npm -v to see your current version. If you want to upgrade to a different version (for example 10.2.1), you can run the following command: npm install -g npm@10.2.1 For more information see NPM's \"try latest stable version of npm\" documentation. Update Node.js If you have Node.js installed, you can run node -v to see your current version. If you want to upgrade to a different version you need to use a Node package manager (e.g. nvm , n etc.). For more information visit Installing Node.js via package manager . Update Git If you have Git installed, you can run git -v to see your current version, if you want to upgrade to latest version you can: Linux Run command sudo apt-get update && sudo apt-get install git Windows What you need to do depends on your current Git version: Older than 2.14.1 Uninstall Git from your system and reinstall Git for Windows . Between 2.14.2 and 2.16.1 Run command git update Greater than or equal to 2.16.1 Run command git update-git-for-windows MacOS (with Homebrew) Install Homebrew Run command brew update && brew install git && brew upgrade git","title":"Update the prerequisites"},{"location":"triply-etl/generic/getting-started/#triplyetl-generator","text":"The TriplyETL Generator allows you to create new ETL projects in your terminal application. If a TriplyETL project already exists, use the TriplyETL Runner instead. In order to use TriplyETL Generator, you must have: Satisfied the prerequisites . A TriplyETL License Key. Contact info@triply.cc to obtain a License Key for your organization. A user account on a TriplyDB server. Contact info@triply.cc to set up a TriplyDB server for your organization, or create a free account on https://triplydb.com . Run the following command to use the TriplyETL Generator: npx @triply/etl-generator If you use TriplyETL Generator for the first time, this command automatically downloads and installs the latest version on your computer. If you have used TriplyETL Generator in the past, this command automatically updates your installation to the latest version, if one is available. TriplyETL Generator will ask the following questions (the exact sequence of questions depends on the answers given): a. TriplyETL License Key b. Project name c. Target folder d. Dataset name e. TriplyDB API Token f. TriplyDB URL g. TriplyDB email h. TriplyDB password Here is an example of a possible run: ? TriplyETL License Key: [hidden] ? Project name: my-etl ? Target folder: my-etl ? Dataset name: my-etl ? Create a new TriplyDB API Token? Yes ? Your TriplyDB URL: my-org.triply.cc ? Your TriplyDB email: my-account@my-organization.com ? Your TriplyDB password: [hidden] \ud83c\udfc1 Your project my-etl is ready for use in my-etl. Go to the target folder that you have specified: cd my-etl You can now use the TriplyETL Runner to run the ETL: npx etl","title":"TriplyETL Generator"},{"location":"triply-etl/generic/getting-started/#triplyetl-runner","text":"The TriplyETL Runner allows you to run a local TriplyETL project in your terminal application. In order to use TriplyETL Runner, you must have: Satisfied the prerequisites . A user account on a TriplyDB server. Contact info@triply.cc to set up a TriplyDB server for your organization, or create a free account on https://triplydb.com . Perform the following steps to use the TriplyETL Runner: Create a local copy of an existing ETL project. If you do not have access to an existing TriplyETL project yet, use the TriplyETL Generator to create a new one. If you have access to an existing TriplyETL project, use the following command to make a local copy with Git: git clone ssh://git@git.triply.cc:10072/customers/my-org/my-project.git Once you have created a local copy of an existing ETL project, go into the corresponding directory: cd my-project Install the dependencies: npm i Transpile the TypeScript files into JavaScript: npm run build You can now use the TriplyETL Runner: npx etl At this point, you should see a first TriplyETL process in your terminal application. If this is not the case, please contact support@triply.cc to help you out. Visit the TriplyETL CLI documentation to learn more about how you can use the TriplyETL Runner. Visit the TriplyETL CI/CD documentation to learn more about how you can automate TriplyETL runs.","title":"TriplyETL Runner"},{"location":"triply-etl/generic/getting-started/#triplyetl-library","text":"If you are a software developer that is building a software application in TypeScript, you can include the TriplyETL Library in your project. In order to use the TriplyETL Library, you must have: Satisfied the prerequisites . A TriplyETL License Key. Contact info@triply.cc to obtain a License Key for your organization. A user account on a TriplyDB server. Contact info@triply.cc to set up a TriplyDB server for your organization, or create a free account on https://triplydb.com . Perform the following steps to use the TriplyETL Library: Open the file .npmrc in your text editor, or create the file if it does not yet exist. Add the following content: @triplydb:registry=https://git.triply.cc/api/v4/packages/npm/ @triplyetl:registry=https://git.triply.cc/api/v4/packages/npm/ //git.triply.cc/api/v4/packages/npm/:_authToken={LICENSE_KEY} Replace {LICENSE_KEY} with your TriplyETL License Key. Contact support@triply.cc if you do not have such a license key yet. Run the following command to add the TriplyETL dependency to your package.json file: npm i @triplyetl/etl Open one of the TypeScript files in your software project. When you add the following line to the top of your file, it should be recognized by your TypeScript editor: import { sdo } from '@triplyetl/vocabularies'","title":"TriplyETL Library"},{"location":"triply-etl/generic/internal-store/","text":"On this page: Internal Store Internal Store \u00b6 The internal store is the storage location for linked data that is created by one or more of the following steps: Step 3 Assert uses data from the record to make linked data assertions in the internal store . Step 4 Enrich improves and extends linked data in the internal store. Step 5 Validate ensures that linked data in the internal store meets the specified quality criteria. Every record that is extracted from a data source has its own internal store (decoupling). The size of the internal store is typically small (because at the record level). This is done on purpose, to ensure that a large number of records can be processed in parallel, without using many hardware resources. Once linked data in the internal store is finalized for one record, the following step can be performed: Step 6 Publish takes the linked data from the internal store, and publishes it to a destination such as TriplyDB .","title":"Internal Store"},{"location":"triply-etl/generic/internal-store/#internal-store","text":"The internal store is the storage location for linked data that is created by one or more of the following steps: Step 3 Assert uses data from the record to make linked data assertions in the internal store . Step 4 Enrich improves and extends linked data in the internal store. Step 5 Validate ensures that linked data in the internal store meets the specified quality criteria. Every record that is extracted from a data source has its own internal store (decoupling). The size of the internal store is typically small (because at the record level). This is done on purpose, to ensure that a large number of records can be processed in parallel, without using many hardware resources. Once linked data in the internal store is finalized for one record, the following step can be performed: Step 6 Publish takes the linked data from the internal store, and publishes it to a destination such as TriplyDB .","title":"Internal Store"},{"location":"triply-etl/generic/maintenance/","text":"On this page: Maintenance Update the TriplyETL dependency Check the current version Check for new versions Assess the impact of updating Perform the update Patch and Minor version update Major version update DTAP configuration Configure CI/CD CI/CD configuration file CI/CD environment variables Understanding Runtime Differences Maintenance \u00b6 Once a TriplyETL repository is configured, it goes into maintenance mode. TriplyETL contains specific functionality to support maintenance. Update the TriplyETL dependency \u00b6 New versions of TriplyETL are released regularly. Moving to a new version is generally a good idea, because it allows new features to be used and will include fixes for known/reported bugs. At the same time, updating to a new version may require you to make some changes to your pipeline. It is important to determine an approach for updating your TriplyETL projects that fits your team and organization. The following sections describe how you can make such a determination. Check the current version \u00b6 The following command prints the TriplyETL version that you are currently using: npm list @triplyetl/etl Check for new versions \u00b6 The following command prints the latest TriplyETL version that is available: npm outdated TriplyETL repositories typically include several developer dependencies as well. These developer dependencies make it easier to write and maintain your ETLs. These developer dependencies are not part of TriplyETL, and must therefore be updated independently of TriplyETL. Assess the impact of updating \u00b6 TriplyETL uses the Semantic Versioning approach: {major}.{minor}.{patch} The impact of updating to a new TriplyETL version can therefore be determined as follows: Patch update Only the {patch} number has increased. This means that one or more bugs have been fixed in a backward compatible manner. You should always be able to perform a patch update without having to make any changes to your configuration. Minor update The {minor} number has increased, but the {major} number is still the same. This means that new functionality was added in a backward compatible manner. You should always be able to perform a minor update without having to make any changes to your configuration. But you may want to check the changelog to see which new functionalities were added. Major update The {major} number has increased. This means that there are incompatible changes. This means that features may have been removed, or existing features may have changed. In such cases, changes to your configuration are almost certainly necessary, and may take some time to implement. Any changes you need to make are described in the changelog . Perform the update \u00b6 Based on the outcome of the previous step, a maintainer of the repository decides which dependencies should be updated to which versions. Since Patch and Minor version updates are always safe to make, we discuss them separately from the more impactful Major version updates. Patch and Minor version update \u00b6 You can update to the latest patch or minor version with the following command: npm up This command may change the contents of the package-lock.json file. These changes must be committed and pushed as part of performing the update. Notice that this command will only perform safe (i.e. patch and/or minor) updates. Major version update \u00b6 You can update to the latest major version with the following command: npm i @version This means that the following command is used to update to a specific TriplyETL major version: npm i @triplyetl/etl@3.0.0 This command will change the contents of the package.json file. These changes must be committed and pushed as part of performing the update. DTAP configuration \u00b6 TriplyETL provides out-of-the-box support for the DTAP approach for configuring production systems. DTAP stands for the four environments in which the ETL can run: Development Test Acceptance Production When working on a pipeline it is best to at least run it in the following two modes: Acceptance mode Upload the result of the pipeline to the user account for which the API Token was created. Production mode Upload the result of the pipeline to the organization where the production version of the data is published. Having multiple modes ensures that the production version of a dataset is not accidentally overwritten during development. export function account(): any { switch (Etl.environment) { case 'Development': return undefined case 'Testing': return 'my-org-testing' case 'Acceptance': return 'my-org-acceptance' case 'Production': return 'my-org' } } const etl = new Etl() etl.use( // Your ETL pipeline is configured here. toRdf(Destination.triplyDb.rdf(account(), 'my-dataset')), ) By default, you run the pipeline in Development mode. If you want to run in another mode, you must set the ENV environment variable. You can do this in the .env file of your TriplyETL repository. For example, the following runs the pipeline in Testing mode: ENV=Testing You can also set the ENV variable in the GitLab CI/CD environment. This allows you to automatically run different pipelines, according to the DTAP approach for production systems. Configure CI/CD \u00b6 TriplyETL pipelines can be configured to run automatically in any CI/CD environment. This section explains how you can configure an automated TriplyETL pipeline in GitLab. Notice that the configuration in any other CI/CD environment will be more or less similar to what is explained in this section. CI/CD configuration file \u00b6 The TriplyETL Generator creates a basic configuration file for running TriplyETL in GitLab CI/CD. The configuration file is called .gitlab-ci.yml . The configuration contains a list of stages: stages: - first_stage - second_stage - third_stage These stages will run sequentially. For the above example: the pipeline starts by running the first stage, then runs the second stage, and finally runs the third stage. Within each stage, you can configure one or more TriplyETL scripts. When more then one script is specified for the same stage, these scripts will run in parallel. This allows you to specify any combination of sequential and parallel processes. The following example assumes that the following scripts are present in the TriplyETL repository: - src/ - create_vocabulary.ts - create_dataset_a.ts - create_dataset_b.ts - create_knowledge_graph.ts - .gitlab-ci.yml We want to configure our CI/CD in the following way: Start by creating the vocabulary (script create_vocabulary.ts ). This vocabulary will be used in the validation step of the two scripts that create the two datasets. Once the vocabulary is created, create the two datasets (scripts create_dataset_a.ts and create_dataset_b.ts ). The datasets can be created in parallel, but they both require that vocabulary creation is finalized. Once the two datasets are created, create the knowledge graph (script create_knowledge_graph.ts ), which combines the two datasets and the vocabulary in one dataset. This specific configuration looks as follows: create_vocabulary: stage: first_stage interruptible: true allow_failure: false artifacts: !reference [.etl-template, artifacts] script: - !reference [.etl-template, install] - !reference [.etl-template, run-etl] rules: - !reference [.etl-template, rules] create_dataset_a: stage: second_stage interruptible: true allow_failure: false artifacts: !reference [.etl-template, artifacts] script: - !reference [.etl-template, install] - !reference [.etl-template, run-etl] rules: - !reference [.etl-template, rules] create_dataset_b: stage: second_stage interruptible: true allow_failure: false artifacts: !reference [.etl-template, artifacts] script: - !reference [.etl-template, install] - !reference [.etl-template, run-etl] rules: - !reference [.etl-template, rules] create_knowledge_graph: stage: third_stage interruptible: true allow_failure: false artifacts: !reference [.etl-template, artifacts] script: - !reference [.etl-template, install] - !reference [.etl-template, run-etl] rules: - !reference [.etl-template, rules] CI/CD environment variables \u00b6 In a normal ETL the only variables that should be present AFAIK are: ENV (value: acceptance or production), TRIPLYDB_TOKEN (value: customer's TriplyDB token), PIPELINE_NAME (value: explained below), and optionally TIMEOUT (value: time description e.g. \"1H\") TriplyETL pipelines interpret the following environment variables, that may be specified in the CI/CD environment: ENV When DTAP is used, this environment variable specifies whether the pipeline runs in \"Development\", \"Test\", \"Acceptance\", or in \"Production\". TRIPLYDB_TOKEN The TriplyDB API Token that is used by the automated pipeline, and that allows the pipeline to read from and write to a TriplyDB server. PIPELINE_NAME A descriptive name that is used by GitLab in pipeline overviews. This is specifically useful if you are running multiple pipelines, in which case this descriptive name helps you to distinguish runs. One example of running multiple pipelines is running in DTAP; in which case the descriptive names for the pipelines may be \"Schedule: Acceptance\" and \"Schedule: Production\". TIMEOUT This environment variable can be set to a duration that is shorted than the duration of the pipeline. If a timeout is set and reached, TriplyETL will finish the ETL in an orderly fashion: saving the processed data, saving the performance log files, and saving the generated validation report (if any). This is useful for pipelines that would otherwise be terminated by the CI/CD environment, in which case TriplyETL is terminated immediately, without having the ability to nicely save before exiting. HEAD The maximum number of records that is being processed by the TriplyETL pipeline. This environment variable can be set in test runs that only want to test whether the ETL works for some records, without requiring it to run for all records. For example, in a DTAP Test run this number may be set to 10 to test whether the source can be accessed and the generated data can be uploaded to a TriplyDB server. Understanding Runtime Differences \u00b6 It's important to be aware that runtime differences can occur when comparing TriplyETL pipeline runtimes in different environments, particularly when comparing them to GitLab CI/CD runtimes. There are two main factors that can influence runtime differences: Overhead in CI Jobs : GitLab CI jobs may introduce overhead beyond the actual ETL computation, such as setting up a containerized environment and additional CI-specific steps. A difference of 1 to 5 minutes between GitLab CI and TriplyETL runtimes is normal due to this overhead. Use of copySource() Function : Significant runtime differences exceeding 5 minutes can be attributed to the use of the copySource() function, which operates outside of the ETL application and contributes to the total runtime but not the middleware runtime. If you encounter a runtime difference greater than 5 minutes, and the copySource() function hasn't been used, it is recommended to report the issue to Triply. The issue will be further investigated to identify and address any potential causes. Understanding these factors and taking appropriate action will help you manage your TriplyETL pipelines effectively in a CI/CD environment.","title":"Maintenance"},{"location":"triply-etl/generic/maintenance/#maintenance","text":"Once a TriplyETL repository is configured, it goes into maintenance mode. TriplyETL contains specific functionality to support maintenance.","title":"Maintenance"},{"location":"triply-etl/generic/maintenance/#update-the-triplyetl-dependency","text":"New versions of TriplyETL are released regularly. Moving to a new version is generally a good idea, because it allows new features to be used and will include fixes for known/reported bugs. At the same time, updating to a new version may require you to make some changes to your pipeline. It is important to determine an approach for updating your TriplyETL projects that fits your team and organization. The following sections describe how you can make such a determination.","title":"Update the TriplyETL dependency"},{"location":"triply-etl/generic/maintenance/#check-the-current-version","text":"The following command prints the TriplyETL version that you are currently using: npm list @triplyetl/etl","title":"Check the current version"},{"location":"triply-etl/generic/maintenance/#check-for-new-versions","text":"The following command prints the latest TriplyETL version that is available: npm outdated TriplyETL repositories typically include several developer dependencies as well. These developer dependencies make it easier to write and maintain your ETLs. These developer dependencies are not part of TriplyETL, and must therefore be updated independently of TriplyETL.","title":"Check for new versions"},{"location":"triply-etl/generic/maintenance/#assess-the-impact-of-updating","text":"TriplyETL uses the Semantic Versioning approach: {major}.{minor}.{patch} The impact of updating to a new TriplyETL version can therefore be determined as follows: Patch update Only the {patch} number has increased. This means that one or more bugs have been fixed in a backward compatible manner. You should always be able to perform a patch update without having to make any changes to your configuration. Minor update The {minor} number has increased, but the {major} number is still the same. This means that new functionality was added in a backward compatible manner. You should always be able to perform a minor update without having to make any changes to your configuration. But you may want to check the changelog to see which new functionalities were added. Major update The {major} number has increased. This means that there are incompatible changes. This means that features may have been removed, or existing features may have changed. In such cases, changes to your configuration are almost certainly necessary, and may take some time to implement. Any changes you need to make are described in the changelog .","title":"Assess the impact of updating"},{"location":"triply-etl/generic/maintenance/#perform-the-update","text":"Based on the outcome of the previous step, a maintainer of the repository decides which dependencies should be updated to which versions. Since Patch and Minor version updates are always safe to make, we discuss them separately from the more impactful Major version updates.","title":"Perform the update"},{"location":"triply-etl/generic/maintenance/#patch-and-minor-version-update","text":"You can update to the latest patch or minor version with the following command: npm up This command may change the contents of the package-lock.json file. These changes must be committed and pushed as part of performing the update. Notice that this command will only perform safe (i.e. patch and/or minor) updates.","title":"Patch and Minor version update"},{"location":"triply-etl/generic/maintenance/#major-version-update","text":"You can update to the latest major version with the following command: npm i @version This means that the following command is used to update to a specific TriplyETL major version: npm i @triplyetl/etl@3.0.0 This command will change the contents of the package.json file. These changes must be committed and pushed as part of performing the update.","title":"Major version update"},{"location":"triply-etl/generic/maintenance/#dtap-configuration","text":"TriplyETL provides out-of-the-box support for the DTAP approach for configuring production systems. DTAP stands for the four environments in which the ETL can run: Development Test Acceptance Production When working on a pipeline it is best to at least run it in the following two modes: Acceptance mode Upload the result of the pipeline to the user account for which the API Token was created. Production mode Upload the result of the pipeline to the organization where the production version of the data is published. Having multiple modes ensures that the production version of a dataset is not accidentally overwritten during development. export function account(): any { switch (Etl.environment) { case 'Development': return undefined case 'Testing': return 'my-org-testing' case 'Acceptance': return 'my-org-acceptance' case 'Production': return 'my-org' } } const etl = new Etl() etl.use( // Your ETL pipeline is configured here. toRdf(Destination.triplyDb.rdf(account(), 'my-dataset')), ) By default, you run the pipeline in Development mode. If you want to run in another mode, you must set the ENV environment variable. You can do this in the .env file of your TriplyETL repository. For example, the following runs the pipeline in Testing mode: ENV=Testing You can also set the ENV variable in the GitLab CI/CD environment. This allows you to automatically run different pipelines, according to the DTAP approach for production systems.","title":"DTAP configuration"},{"location":"triply-etl/generic/maintenance/#configure-cicd","text":"TriplyETL pipelines can be configured to run automatically in any CI/CD environment. This section explains how you can configure an automated TriplyETL pipeline in GitLab. Notice that the configuration in any other CI/CD environment will be more or less similar to what is explained in this section.","title":"Configure CI/CD"},{"location":"triply-etl/generic/maintenance/#cicd-configuration-file","text":"The TriplyETL Generator creates a basic configuration file for running TriplyETL in GitLab CI/CD. The configuration file is called .gitlab-ci.yml . The configuration contains a list of stages: stages: - first_stage - second_stage - third_stage These stages will run sequentially. For the above example: the pipeline starts by running the first stage, then runs the second stage, and finally runs the third stage. Within each stage, you can configure one or more TriplyETL scripts. When more then one script is specified for the same stage, these scripts will run in parallel. This allows you to specify any combination of sequential and parallel processes. The following example assumes that the following scripts are present in the TriplyETL repository: - src/ - create_vocabulary.ts - create_dataset_a.ts - create_dataset_b.ts - create_knowledge_graph.ts - .gitlab-ci.yml We want to configure our CI/CD in the following way: Start by creating the vocabulary (script create_vocabulary.ts ). This vocabulary will be used in the validation step of the two scripts that create the two datasets. Once the vocabulary is created, create the two datasets (scripts create_dataset_a.ts and create_dataset_b.ts ). The datasets can be created in parallel, but they both require that vocabulary creation is finalized. Once the two datasets are created, create the knowledge graph (script create_knowledge_graph.ts ), which combines the two datasets and the vocabulary in one dataset. This specific configuration looks as follows: create_vocabulary: stage: first_stage interruptible: true allow_failure: false artifacts: !reference [.etl-template, artifacts] script: - !reference [.etl-template, install] - !reference [.etl-template, run-etl] rules: - !reference [.etl-template, rules] create_dataset_a: stage: second_stage interruptible: true allow_failure: false artifacts: !reference [.etl-template, artifacts] script: - !reference [.etl-template, install] - !reference [.etl-template, run-etl] rules: - !reference [.etl-template, rules] create_dataset_b: stage: second_stage interruptible: true allow_failure: false artifacts: !reference [.etl-template, artifacts] script: - !reference [.etl-template, install] - !reference [.etl-template, run-etl] rules: - !reference [.etl-template, rules] create_knowledge_graph: stage: third_stage interruptible: true allow_failure: false artifacts: !reference [.etl-template, artifacts] script: - !reference [.etl-template, install] - !reference [.etl-template, run-etl] rules: - !reference [.etl-template, rules]","title":"CI/CD configuration file"},{"location":"triply-etl/generic/maintenance/#cicd-environment-variables","text":"In a normal ETL the only variables that should be present AFAIK are: ENV (value: acceptance or production), TRIPLYDB_TOKEN (value: customer's TriplyDB token), PIPELINE_NAME (value: explained below), and optionally TIMEOUT (value: time description e.g. \"1H\") TriplyETL pipelines interpret the following environment variables, that may be specified in the CI/CD environment: ENV When DTAP is used, this environment variable specifies whether the pipeline runs in \"Development\", \"Test\", \"Acceptance\", or in \"Production\". TRIPLYDB_TOKEN The TriplyDB API Token that is used by the automated pipeline, and that allows the pipeline to read from and write to a TriplyDB server. PIPELINE_NAME A descriptive name that is used by GitLab in pipeline overviews. This is specifically useful if you are running multiple pipelines, in which case this descriptive name helps you to distinguish runs. One example of running multiple pipelines is running in DTAP; in which case the descriptive names for the pipelines may be \"Schedule: Acceptance\" and \"Schedule: Production\". TIMEOUT This environment variable can be set to a duration that is shorted than the duration of the pipeline. If a timeout is set and reached, TriplyETL will finish the ETL in an orderly fashion: saving the processed data, saving the performance log files, and saving the generated validation report (if any). This is useful for pipelines that would otherwise be terminated by the CI/CD environment, in which case TriplyETL is terminated immediately, without having the ability to nicely save before exiting. HEAD The maximum number of records that is being processed by the TriplyETL pipeline. This environment variable can be set in test runs that only want to test whether the ETL works for some records, without requiring it to run for all records. For example, in a DTAP Test run this number may be set to 10 to test whether the source can be accessed and the generated data can be uploaded to a TriplyDB server.","title":"CI/CD environment variables"},{"location":"triply-etl/generic/maintenance/#understanding-runtime-differences","text":"It's important to be aware that runtime differences can occur when comparing TriplyETL pipeline runtimes in different environments, particularly when comparing them to GitLab CI/CD runtimes. There are two main factors that can influence runtime differences: Overhead in CI Jobs : GitLab CI jobs may introduce overhead beyond the actual ETL computation, such as setting up a containerized environment and additional CI-specific steps. A difference of 1 to 5 minutes between GitLab CI and TriplyETL runtimes is normal due to this overhead. Use of copySource() Function : Significant runtime differences exceeding 5 minutes can be attributed to the use of the copySource() function, which operates outside of the ETL application and contributes to the total runtime but not the middleware runtime. If you encounter a runtime difference greater than 5 minutes, and the copySource() function hasn't been used, it is recommended to report the issue to Triply. The issue will be further investigated to identify and address any potential causes. Understanding these factors and taking appropriate action will help you manage your TriplyETL pipelines effectively in a CI/CD environment.","title":"Understanding Runtime Differences"},{"location":"triply-etl/generic/record/","text":"On this page: Record The generic Record Extractor loadRecords() Special keys Special key $recordId Use case: Unique identifiers Use case: Debugging Special key $environment Special key $sheetName Record \u00b6 When a TriplyETL is connected to one of more data sources, a stream of Records will be generated. Records use a generic representation that is independent of the format used in the data sources. The generic Record \u00b6 We illustrate the representation of the generic Record with the following code snippet. This snippet uses extractor fromJson() to extract data from inline JSON source data: import { Etl, fromJson, logRecord } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { id: '123', name: 'John' }, { id: '456', name: 'Jane' }, ]), logRecord(), ) return etl } Debug function logRecord() prints the current record to standard output. When this pipeline is run, the two inline records are printed as follows: { \"id\": \"123\", \"name\": \"John\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"id\": \"456\", \"name\": \"Jane\", \"$recordId\": 2, \"$environment\": \"Development\" } Notice that TriplyETL adds two keys to both records: $recordId and $environment (see Special Key for more information). Now suppose that we change the source system. We no longer use inline JSON, but a local XML file. The contents of the XML file are as follows: 123 John 456 Jane Let us change the TriplyETL script to use extractor fromXml() and the local file source type: import { Etl, fromXml, logRecord } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( fromXml(Source.file('example.xml')), logRecord(), ) return etl } This new script logs the following two records: { \"id\": \"123\", \"name\": \"John\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"id\": \"456\", \"name\": \"Jane\", \"$recordId\": 2, \"$environment\": \"Development\" } Notice that the two records that are logged from an XML source are completely identical to the two records that were previously logged from a JSON source. This is an essential property of TriplyETL: it treats data from any source system in the same way, using the same intermediary Record format. This makes it easy to write pipelines that process data from a large number of different data sources. This also makes replacing a data source in one format with a data source in another format a relatively cheap operation. More often than not, only the source extractor needs to be changed, and all transformations and assertions remain as they were. Extractor loadRecords() \u00b6 The loadRecords() function allows us to run a sub ETL and store its records to the main ETL. It is used when we would like to add additional data from different source to the main ETL. The function expects two arguments and can be run with the following snippet: fromSrc - The Source to load the data from. The list of available extractors can be seen in data sources overview page . key - A new key where the records are stored. loadRecords(fromSrc, 'key'), It is important to call the loadRecords() function after loading record data in the main ETL. The following code snippet extracts records from a json object (main ETL), then extracts records from a json file ( tableMap.json ) stored as an asset and stores them in the key _table in the record of the main ETL: fromJson({ country: 'be' }), loadRecords(fromJson(Source.TriplyDb.asset('test', { name: 'tableMap.json' })), '_table'), The combined record looks as following: { \"country\": \"be\", \"$recordId\": 1, \"$environment\": \"Development\", \"_table\": [ { \"be\": \"http://ex.com/Belgium\", \"nl\": \"http://ex.com/Netherlands\", \"de\": \"http://ex.com/Germany\", \"en\": \"http://ex.com/England\", \"$recordId\": 1, \"$environment\": \"Development\", \"$fileName\": \"tableMap.json\" } ] } Special keys \u00b6 Records in TriplyETL contain several special keys. These special keys start with a dollar sign character ( $ ). The special keys contain values that are inserted during the Extract step. These special keys can be used in the same way as regular keys in your TriplyETL configuration. We now discuss these special keys in detail. Special key $recordId \u00b6 The special key $recordId assigns a unique number to every record that is processed in one single run of a TriplyETL pipeline. If the source data does not change, multiple runs of the TriplyETL pipeline will always generate the same record IDs. However, if source data changes, multiple runs of the TriplyETL pipeline may generate different record IDs for the same record. Use case: Unique identifiers \u00b6 The first main use case of the $recordId key is to create IRIs that are unique within one single run of a TriplyETL pipeline. Suppose the following table is our source data: First name Last name John Doe Jane Doe John Doe We need to create an IRI for every person in this table. Notice that the table contains no unique properties: there are two different persons with the same first and last name. This means that we cannot use the keys \"First name\" and \"Last name\" in our record in order to create our IRIs. Luckily, the source connector adds the $recordId for us: { \"First name\": \"John\", \"Last name\": \"Doe\", \"$recordId\": 1 } { \"First name\": \"Jane\", \"Last name\": \"Doe\", \"$recordId\": 2 } { \"First name\": \"John\", \"Last name\": \"Doe\", \"$recordId\": 3 } This allows us to make the following assertion: pairs(iri(prefix.id, '$recordId'), [a, sdo.Person], [sdo.givenName, 'First name'], [sdo.familyName, 'Last name'], ), Which results in the following linked data: id:1 a sdo:Person; sdo:givenName 'John'; sdo:familyName 'Doe'. id:2 a sdo:Person; sdo:givenName 'Jane'; sdo:familyName 'Doe'. id:3 a sdo:Person; sdo:givenName 'John'; sdo:familyName 'Doe'. Notice that the use of the $recordId results in a correct single run of the TriplyETL pipeline. But if the source data changes, the IRIs may change as well. For example, if the first and second row in the source table are swapped, the IRI that denotes \"Jane Doe\" will change from id:2 to id:1 . Use case: Debugging \u00b6 When you are debugging the configuration of a TriplyETL pipeline, it is sometimes useful to perform a specific actions for a specific record. Assuming the stream of records is stable during the debugging effort, the $recordId key can be used to perform such a debugging action; for example: whenEqual('$recordId', 908, logRecord()), Do note that it is generally better to run the TriplyETL for a specific record using the --from-record-id 908 --head 1 command line flags (see CLI ). Special key $environment \u00b6 The TriplyETL record contains special key $environment . Its value denotes the DTAP environment that the pipeline is currently running in. This is one of the following values: \"Development\", \"Test\", \"Acceptance\", or \"Production\". Special key $sheetName \u00b6 The special key $sheetName only occurs in records that original from data source that use the Microsoft Excel format. In such records, this special key contains the name of the sheet from which the record originates. See the documentation for the Microsoft Excel format for more information about this special key.","title":"Record"},{"location":"triply-etl/generic/record/#record","text":"When a TriplyETL is connected to one of more data sources, a stream of Records will be generated. Records use a generic representation that is independent of the format used in the data sources.","title":"Record"},{"location":"triply-etl/generic/record/#the-generic-record","text":"We illustrate the representation of the generic Record with the following code snippet. This snippet uses extractor fromJson() to extract data from inline JSON source data: import { Etl, fromJson, logRecord } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { id: '123', name: 'John' }, { id: '456', name: 'Jane' }, ]), logRecord(), ) return etl } Debug function logRecord() prints the current record to standard output. When this pipeline is run, the two inline records are printed as follows: { \"id\": \"123\", \"name\": \"John\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"id\": \"456\", \"name\": \"Jane\", \"$recordId\": 2, \"$environment\": \"Development\" } Notice that TriplyETL adds two keys to both records: $recordId and $environment (see Special Key for more information). Now suppose that we change the source system. We no longer use inline JSON, but a local XML file. The contents of the XML file are as follows: 123 John 456 Jane Let us change the TriplyETL script to use extractor fromXml() and the local file source type: import { Etl, fromXml, logRecord } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( fromXml(Source.file('example.xml')), logRecord(), ) return etl } This new script logs the following two records: { \"id\": \"123\", \"name\": \"John\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"id\": \"456\", \"name\": \"Jane\", \"$recordId\": 2, \"$environment\": \"Development\" } Notice that the two records that are logged from an XML source are completely identical to the two records that were previously logged from a JSON source. This is an essential property of TriplyETL: it treats data from any source system in the same way, using the same intermediary Record format. This makes it easy to write pipelines that process data from a large number of different data sources. This also makes replacing a data source in one format with a data source in another format a relatively cheap operation. More often than not, only the source extractor needs to be changed, and all transformations and assertions remain as they were.","title":"The generic Record"},{"location":"triply-etl/generic/record/#extractor-loadrecords","text":"The loadRecords() function allows us to run a sub ETL and store its records to the main ETL. It is used when we would like to add additional data from different source to the main ETL. The function expects two arguments and can be run with the following snippet: fromSrc - The Source to load the data from. The list of available extractors can be seen in data sources overview page . key - A new key where the records are stored. loadRecords(fromSrc, 'key'), It is important to call the loadRecords() function after loading record data in the main ETL. The following code snippet extracts records from a json object (main ETL), then extracts records from a json file ( tableMap.json ) stored as an asset and stores them in the key _table in the record of the main ETL: fromJson({ country: 'be' }), loadRecords(fromJson(Source.TriplyDb.asset('test', { name: 'tableMap.json' })), '_table'), The combined record looks as following: { \"country\": \"be\", \"$recordId\": 1, \"$environment\": \"Development\", \"_table\": [ { \"be\": \"http://ex.com/Belgium\", \"nl\": \"http://ex.com/Netherlands\", \"de\": \"http://ex.com/Germany\", \"en\": \"http://ex.com/England\", \"$recordId\": 1, \"$environment\": \"Development\", \"$fileName\": \"tableMap.json\" } ] }","title":"Extractor loadRecords()"},{"location":"triply-etl/generic/record/#special-keys","text":"Records in TriplyETL contain several special keys. These special keys start with a dollar sign character ( $ ). The special keys contain values that are inserted during the Extract step. These special keys can be used in the same way as regular keys in your TriplyETL configuration. We now discuss these special keys in detail.","title":"Special keys"},{"location":"triply-etl/generic/record/#special-key-recordid","text":"The special key $recordId assigns a unique number to every record that is processed in one single run of a TriplyETL pipeline. If the source data does not change, multiple runs of the TriplyETL pipeline will always generate the same record IDs. However, if source data changes, multiple runs of the TriplyETL pipeline may generate different record IDs for the same record.","title":"Special key $recordId"},{"location":"triply-etl/generic/record/#use-case-unique-identifiers","text":"The first main use case of the $recordId key is to create IRIs that are unique within one single run of a TriplyETL pipeline. Suppose the following table is our source data: First name Last name John Doe Jane Doe John Doe We need to create an IRI for every person in this table. Notice that the table contains no unique properties: there are two different persons with the same first and last name. This means that we cannot use the keys \"First name\" and \"Last name\" in our record in order to create our IRIs. Luckily, the source connector adds the $recordId for us: { \"First name\": \"John\", \"Last name\": \"Doe\", \"$recordId\": 1 } { \"First name\": \"Jane\", \"Last name\": \"Doe\", \"$recordId\": 2 } { \"First name\": \"John\", \"Last name\": \"Doe\", \"$recordId\": 3 } This allows us to make the following assertion: pairs(iri(prefix.id, '$recordId'), [a, sdo.Person], [sdo.givenName, 'First name'], [sdo.familyName, 'Last name'], ), Which results in the following linked data: id:1 a sdo:Person; sdo:givenName 'John'; sdo:familyName 'Doe'. id:2 a sdo:Person; sdo:givenName 'Jane'; sdo:familyName 'Doe'. id:3 a sdo:Person; sdo:givenName 'John'; sdo:familyName 'Doe'. Notice that the use of the $recordId results in a correct single run of the TriplyETL pipeline. But if the source data changes, the IRIs may change as well. For example, if the first and second row in the source table are swapped, the IRI that denotes \"Jane Doe\" will change from id:2 to id:1 .","title":"Use case: Unique identifiers"},{"location":"triply-etl/generic/record/#use-case-debugging","text":"When you are debugging the configuration of a TriplyETL pipeline, it is sometimes useful to perform a specific actions for a specific record. Assuming the stream of records is stable during the debugging effort, the $recordId key can be used to perform such a debugging action; for example: whenEqual('$recordId', 908, logRecord()), Do note that it is generally better to run the TriplyETL for a specific record using the --from-record-id 908 --head 1 command line flags (see CLI ).","title":"Use case: Debugging"},{"location":"triply-etl/generic/record/#special-key-environment","text":"The TriplyETL record contains special key $environment . Its value denotes the DTAP environment that the pipeline is currently running in. This is one of the following values: \"Development\", \"Test\", \"Acceptance\", or \"Production\".","title":"Special key $environment"},{"location":"triply-etl/generic/record/#special-key-sheetname","text":"The special key $sheetName only occurs in records that original from data source that use the Microsoft Excel format. In such records, this special key contains the name of the sheet from which the record originates. See the documentation for the Microsoft Excel format for more information about this special key.","title":"Special key $sheetName"},{"location":"triply-etl/generic/skolem-iris/","text":"On this page: Skolem IRIs What are Skolem IRIs? Why does TriplyETL use Skolem IRIs? Skolem IRIs are a lossless approach An illustrative example Skolem IRIs \u00b6 TriplyETL uses Skolem IRIs instead of blank nodes. This approach is consistent with the RDF 1.1 standard. This page details why TriplyETL uses Skolem IRIs, and shows how they are used to stand in for blank nodes in a generic and standards-compliant way. What are Skolem IRIs? \u00b6 Skolem IRIs are IRIs that are used to systematically stand in for blank nodes. Whenever a blank node occurs in linked data, it is allowed to be replaced by a Skolem IRI. Skolem IRIs are guaranteed to universally unique, while blank nodes are only guaranteed to be unique within the context in which they occur. Why does TriplyETL use Skolem IRIs? \u00b6 Before linked data that contains blank nodes can be used, all blank nodes in that linked data must be renamed in order to avoid name collisions. Since Skolem IRIs are universally unique, there is no such requirements when using linked data that contains Skolem IRIs instead of blank nodes. TriplyETL uses Skolem IRIs instead of blank nodes, because this makes the linked data that TriplyETL creates easier to use. This easy-of-use applies to processing inside TriplyETL, but also applies to the use of linked data produced by TriplyETL after publication. Skolem IRIs are a lossless approach \u00b6 Since Skolem IRIs are required to use a specific path prefix (i.e. /.well-known/genid/ ), users of linked data containing Skolem IRIs are able to distinguish them from other IRIs. As such, it is possible to systematically replace Skolem IRIs with blank nodes again, since the translation from and to Skolem IRIs does not lose any information. Notice that while there are no benefits to replacing Skolem IRIs with blank nodes, only downsides, some users may still wish to perform such replacements. An illustrative example \u00b6 We show the downsides of linked data that contains blank nodes, and the benefits of linked data that contains Skolem IRIs that replace blank nodes, with an example. The following two linked data snippets use the same blank node label ('eFsgehcX9k25dv'): prefix ns: ns:product ns:height _:eFsgehcX9k25dv. _:eFsgehcX9k25dv ns:unitOfMeasure ns:meter; ns:value 1.1e0. and: prefix ns: ns:product ns:width _:eFsgehcX9k25dv. _:eFsgehcX9k25dv ns:unitOfMeasure ns:centimeter; ns:value 1.5e0. Since the blank node label occurs in two different contexts, we are not allowed to naively combine these two snippets. In fact, if we would naive combine them, we would end up with the following incorrect information: prefix ns: ns:product ns:height _:eFsgehcX9k25dv. ns:width _:eFsgehcX9k25dv. _:eFsgehcX9k25dv ns:unitOfMeasure ns:centimeter, ns:meter; ns:value 1.1e0, 1.5e0. Notice that in the combined snippet, it is no longer possible to determine what is the height and what is the width of the product. Neither is it possible to determine which unit of measure belongs to which numeric value. In order to avoid this issue with blank nodes, users are required to systematically rename them whenever they make use of the data. For example, in order to combine the two original linked data snippets, their blank node labels must first be renamed: prefix ns: ns:product ns:height _:eFsgehcX9k25dv_renamed1; ns:width _:eFsgehcX9k25dv_renamed2. _:eFsgehcX9k25dv_renamed1 ns:unitOfMeasure ns:meter; ns:value 1.1e0. _:eFsgehcX9k25dv_renamed2 ns:unitOfMeasure ns:centimeter; ns:value 1.5e0. This renaming of blank nodes must be performed every time the data is used . This is very cumbersome, and many users of linked data are unable to perform such renaming operations reliably. Now we show the same example, but by first replacing all blank nodes with Skolem IRIs. We start with the following two linked data snippets: prefix ns: prefix skolem: ns:product ns:height skolem:eFsgehcX9k25dv. skolem:eFsgehcX9k25dv ns:unitOfMeasure ns:meter; ns:value 1.1e0. and: prefix ns: prefix skolem: ns:product ns:width skolem:JmsR9ev5QgHZyx. skolem:JmsR9ev5QgHZyx ns:unitOfMeasure ns:centimeter; ns:value 1.5e0. Notice that we are able to use these linked data snippets directly, without having to perform a renaming operation. For example, we can naively combine the two snippets into one: prefix ns: prefix skolem: ns:product ns:height skolem:eFsgehcX9k25dv; ns:width skolem:JmsR9ev5QgHZyx. skolem:eFsgehcX9k25dv ns:unitOfMeasure ns:meter; ns:value 1.1e0. skolem:JmsR9ev5QgHZyx ns:unitOfMeasure ns:centimeter; ns:value 1.5e0.","title":"Skolem iris"},{"location":"triply-etl/generic/skolem-iris/#skolem-iris","text":"TriplyETL uses Skolem IRIs instead of blank nodes. This approach is consistent with the RDF 1.1 standard. This page details why TriplyETL uses Skolem IRIs, and shows how they are used to stand in for blank nodes in a generic and standards-compliant way.","title":"Skolem IRIs"},{"location":"triply-etl/generic/skolem-iris/#what-are-skolem-iris","text":"Skolem IRIs are IRIs that are used to systematically stand in for blank nodes. Whenever a blank node occurs in linked data, it is allowed to be replaced by a Skolem IRI. Skolem IRIs are guaranteed to universally unique, while blank nodes are only guaranteed to be unique within the context in which they occur.","title":"What are Skolem IRIs?"},{"location":"triply-etl/generic/skolem-iris/#why-does-triplyetl-use-skolem-iris","text":"Before linked data that contains blank nodes can be used, all blank nodes in that linked data must be renamed in order to avoid name collisions. Since Skolem IRIs are universally unique, there is no such requirements when using linked data that contains Skolem IRIs instead of blank nodes. TriplyETL uses Skolem IRIs instead of blank nodes, because this makes the linked data that TriplyETL creates easier to use. This easy-of-use applies to processing inside TriplyETL, but also applies to the use of linked data produced by TriplyETL after publication.","title":"Why does TriplyETL use Skolem IRIs?"},{"location":"triply-etl/generic/skolem-iris/#skolem-iris-are-a-lossless-approach","text":"Since Skolem IRIs are required to use a specific path prefix (i.e. /.well-known/genid/ ), users of linked data containing Skolem IRIs are able to distinguish them from other IRIs. As such, it is possible to systematically replace Skolem IRIs with blank nodes again, since the translation from and to Skolem IRIs does not lose any information. Notice that while there are no benefits to replacing Skolem IRIs with blank nodes, only downsides, some users may still wish to perform such replacements.","title":"Skolem IRIs are a lossless approach"},{"location":"triply-etl/generic/skolem-iris/#an-illustrative-example","text":"We show the downsides of linked data that contains blank nodes, and the benefits of linked data that contains Skolem IRIs that replace blank nodes, with an example. The following two linked data snippets use the same blank node label ('eFsgehcX9k25dv'): prefix ns: ns:product ns:height _:eFsgehcX9k25dv. _:eFsgehcX9k25dv ns:unitOfMeasure ns:meter; ns:value 1.1e0. and: prefix ns: ns:product ns:width _:eFsgehcX9k25dv. _:eFsgehcX9k25dv ns:unitOfMeasure ns:centimeter; ns:value 1.5e0. Since the blank node label occurs in two different contexts, we are not allowed to naively combine these two snippets. In fact, if we would naive combine them, we would end up with the following incorrect information: prefix ns: ns:product ns:height _:eFsgehcX9k25dv. ns:width _:eFsgehcX9k25dv. _:eFsgehcX9k25dv ns:unitOfMeasure ns:centimeter, ns:meter; ns:value 1.1e0, 1.5e0. Notice that in the combined snippet, it is no longer possible to determine what is the height and what is the width of the product. Neither is it possible to determine which unit of measure belongs to which numeric value. In order to avoid this issue with blank nodes, users are required to systematically rename them whenever they make use of the data. For example, in order to combine the two original linked data snippets, their blank node labels must first be renamed: prefix ns: ns:product ns:height _:eFsgehcX9k25dv_renamed1; ns:width _:eFsgehcX9k25dv_renamed2. _:eFsgehcX9k25dv_renamed1 ns:unitOfMeasure ns:meter; ns:value 1.1e0. _:eFsgehcX9k25dv_renamed2 ns:unitOfMeasure ns:centimeter; ns:value 1.5e0. This renaming of blank nodes must be performed every time the data is used . This is very cumbersome, and many users of linked data are unable to perform such renaming operations reliably. Now we show the same example, but by first replacing all blank nodes with Skolem IRIs. We start with the following two linked data snippets: prefix ns: prefix skolem: ns:product ns:height skolem:eFsgehcX9k25dv. skolem:eFsgehcX9k25dv ns:unitOfMeasure ns:meter; ns:value 1.1e0. and: prefix ns: prefix skolem: ns:product ns:width skolem:JmsR9ev5QgHZyx. skolem:JmsR9ev5QgHZyx ns:unitOfMeasure ns:centimeter; ns:value 1.5e0. Notice that we are able to use these linked data snippets directly, without having to perform a renaming operation. For example, we can naively combine the two snippets into one: prefix ns: prefix skolem: ns:product ns:height skolem:eFsgehcX9k25dv; ns:width skolem:JmsR9ev5QgHZyx. skolem:eFsgehcX9k25dv ns:unitOfMeasure ns:meter; ns:value 1.1e0. skolem:JmsR9ev5QgHZyx ns:unitOfMeasure ns:centimeter; ns:value 1.5e0.","title":"An illustrative example"},{"location":"triply-etl/generic/vocabularies/","text":"On this page: Supported vocabularies Supported vocabularies \u00b6 TriplyETL includes out-of-the-box support for a large number of external vocabularies, enumerated in the table below. If you miss an external vocabulary in that table, then let us know via: support@triply.cc With the latest update, TriplyETL vocabularies are now represented as Vocabulary objects, replacing the previous usage of objects with the type IRI . This change may necessitate adjustments to existing ETLs that utilize static vocabularies, such as aat . In this case, the vocabulary would need to be updated to aat.toIri() to ensure compatibility with the correct type. See the external vocabularies section for more information on how to use external vocabularies in ETL configuration. The following table lists the currently supported vocabularies: Name Version Use cases Description Argument Model Ontology (AMO) 1.0 Fake news detection, argumentation structure An ontology for describing argumentation according to Toulmin's argumentation model. Bibliographic Ontology Specification (BIBO) no version info Libraries, citation graphs, bibliography The Bibliographic Ontology Specification provides main concepts and properties for describing citations and bibliographic references (i.e. quotes, books, articles, etc) on the Semantic Web. Building Topology Ontology (BOT) 0.3.2 Buildings The Building Topology Ontology (BOT) is a minimal ontology for describing the core topological concepts of a building. Brick: A uniform metadata schema for buildings no version info Buildings Brick is an open-source effort to standardize semantic descriptions of the physical, logical and virtual assets in buildings and the relationships between them. Cultural Heritage Ontology (CEO) 1.41 Cultural heritage The CEO is the complete semantic representation of the logical data models CHO and KENNIS from the data layer of the RCE. Conceptual Reference Model (CRM) 7.1.2 Cultural heritage The CIDOC Conceptual Reference Model (CRM) provides definitions and a formal structure for describing the implicit and explicit concepts and relationships used in cultural heritage documentation. Conceptual Reference Model (CRM) - Digital no version info Digitization products An ontology and RDF Schema to encode metadata about the steps and methods of production (\u201cprovenance\u201d) of digitization products and synthetic digital representations such as 2D, 3D or even animated Models created by various technologies. Its distinct features compared to competitive models is the complete inclusion of the initial physical measurement processes and their parameters. Conceptual Reference Model (CRM) - PC no version info Cultural heritage CIDOC CRM v7.1.2 module for the implementation of properties of properties in RDFs. DBpedia Ontology 1.0.0 DBpedia Ontology for DBpedia Data Catalog Vocabulary (DCAT) 2.0.0 Data catalogs, datasets DCAT is an RDF vocabulary designed to facilitate interoperability between data catalogs published on the Web. Dublin Core Type Vocabulary 2012-06-14 Classes The DCMI Type Vocabulary was created in 2001. It defines classes for basic types of thing that can be described using DCMI metadata terms. Dublin Core Terms 1.1.0 Metadata terms This document is an up-to-date specification of all metadata terms maintained by the Dublin Core Metadata Initiative, including properties, vocabulary encoding schemes, syntax encoding schemes, and classes. Document Elements Ontology (DEO) 2015-07-03 Rhetorical elements within documents DEO, The Discourse Elements Ontology, is an ontology written in OWL 2 DL that provides a structured vocabulary for rhetorical elements within documents (e.g. Introduction, Discussion, Acknowledgements, Reference List, Figures, Appendix), enabling these to be described in RDF. It uses some of the rhetorical block elements from the SALT Rhetorical Ontology and the Ontology of Rhetorical Blocks. Document Components Ontology (DoCo) 1.3.0 Document components The Document Components Ontology (DoCO) in an ontology that provides a structured vocabulary written of document components, both structural (e.g., block, inline, paragraph, section, chapter) and rhetorical (e.g., introduction, discussion, acknowledgements, reference list, figure, appendix). ERA Vocabulary 2022-02-02 Railway infrastructure Vocabulary defined by the European Union Agency for Railways to describe the concepts and relationships related to the European railway infrastructure and the vehicles authorized to operate over it. FRBR-aligned Bibliographic Ontology (FaBiO) no version info Publishing, bibliography, textual publications An ontology for recording and publishing on the Semantic Web descriptions of entities that are published or potentially publishable, and that contain or are referred to by bibliographic references, or entities used to define such bibliographic references. Friend of a Friend (FOAF) 0.1.0 People, information FOAF is a project devoted to linking people and information using the Web. Regardless of whether information is in people's heads, in physical or digital documents, or in the form of factual data, it can be linked. FOAF integrates three kinds of network: social networks of human collaboration, friendship and association; representational networks that describe a simplified view of a cartoon universe in factual terms, and information networks that use Web-based linking to share independently published descriptions of this inter-connected world. FOAF does not compete with socially-oriented Web sites; rather it provides an approach in which different sites can tell different parts of the larger story, and by which users can retain some control over their information in a non-proprietary format. Functional Requirements for Bibliographic Records (FRBR) 2005-08-10 Bibliography This vocabulary is an expression in RDF of the concepts and relations described in the IFLA report on the Functional Requirements for Bibliographic Records (FRBR). GeoSPARQL 1.0 Geospatial data The OGC GeoSPARQL standard supports representing and querying geospatial data on the Semantic Web. GeoSPARQL defines a vocabulary for representing geospatial data in RDF, and it defines an extension to the SPARQL query language for processing geospatial data. In addition, GeoSPARQL is designed to accommodate systems based on qualitative spatial reasoning and systems based on quantitative spatial computations. Geography Markup Language (GML) Encoding Standard 3.2.1 Geography XML grammar for expressing geographical features. GML serves as a modeling language for geographic systems as well as an open interchange format for geographic transactions on the Internet. Getty Vocabulary Program (GVP) 3.3.0 Classes, properties and values in GVP LOD The GVP Ontology defines classes, properties and values ( skos:Concept s) used in GVP LOD. Linked Art no version info Cultural heritage Linked Art describes cultural heritage resources, with a focus on artworks and museum-oriented activities. It defines common patterns and terms to ensure that the resulting data can be easily used and is based on real-world data and use cases. Metagegevens voor duurzaam toegankelijke overheidsinformatie (MDTO) 1.0 Government information MDTO (Metadata for sustainably accessible government information) is a standard for recording and exchanging unambiguous metadata to enable the sustainable accessibility of government information. Organization ontology 0.8.0 Organizational structures Vocabulary for describing organizational structures, specializable to a broad variety of types of organization. Web Ontology Language (OWL) 2.0.0 Things, groups of things, and relations between things Language (OWL) is a Semantic Web language designed to represent rich and complex knowledge about things, groups of things, and relations between things. Person Name Vocabulary (PNV) 1.1 Persons' names The Person Name Vocabulary (PNV) is an RDF vocabulary and data model for persons' names. It is applicable to many datasets in which persons are described, as it accommodates different levels of data granularity. It furthermore allows for easy alignment of name elements, including idiosyncratic ones, such as family name prefixes and patronymics, with standard vocabularies such as Schema.org, FOAF, DBpedia and Wikidata, thus guaranteeing optimal data interoperability. PREMIS 3 Ontology 3.0.0 Digital objects Ontology for PREMIS 3, the international standard for metadata to support the preservation of digital objects and ensure their long-term usability. PROV Ontology (PROV-O) no version info Provenance information The PROV Ontology (PROV-O) expresses the PROV Data Model using the OWL2 Web Ontology Language (OWL2). It provides a set of classes, properties, and restrictions that can be used to represent and interchange provenance information generated in different systems and under different contexts. It can also be specialized to create new classes and properties to model provenance information for different applications and domains. Data Cube Vocabulary 0.2 Statistical data, multi-dimensional data sets There are many situations where it would be useful to be able to publish multi-dimensional data, such as statistics, on the web in such a way that it can be linked to related data sets and concepts. The Data Cube vocabulary provides a means to do this using the W3C RDF (Resource Description Framework) standard. The model underpinning the Data Cube vocabulary is compatible with the cube model that underlies SDMX (Statistical Data and Metadata eXchange), an ISO standard for exchanging and sharing statistical data and metadata among organizations. The Data Cube vocabulary is a core foundation which supports extension vocabularies to enable publication of other aspects of statistical data flows or other multi-dimensional data sets. Quantities, Units, Dimensions and Types (QUDT) 2.1.2 Physical quantities, units of measure, dimensions The QUDT, or \u201cQuantity, Unit, Dimension and Type\u201d schema defines the base classes properties, and restrictions used for modeling physical quantities, units of measure, and their dimensions in various measurement systems. RDA element sets: Agent properties 1.0.0 RDA Agent The Agent properties element set consists of properties representing attributes and relationships of the RDA Agent , Collective Agent , Person , Family , and Corporate Body entities. RDA element sets: Classes 1.0.0 Classes representing the RDA entities The Classes element set consists of classes representing the RDA entities, including RDA Entity, Work, Expression, Manifestation, Item, Agent, Collective Agent, Person, Family, Corporate Body, Nomen, Place, and Timespan. RDA Content Type 1.0.0 Content A categorization reflecting the fundamental form of communication in which the content is expressed and the human sense through which it is intended to be perceived. RDA Carrier Type 1.0.0 Carrier A categorization reflecting the format of the storage medium and housing of a carrier in combination with the type of intermediation device required to view, play, run, etc., the content of a resource. RDA Element Sets: Expression Properties 1.0.0 RDA Expression properties The Expression properties element set consists of properties representing attributes and relationships of the RDA Expression entity. RDA element sets: Item properties 5.0.12 RDA Item The Item properties element set consists of properties representing attributes and relationships of the RDA Item entity. RDA Element Sets: Manifestation Properties 1.0.0 RDA Manifestation The Manifestation properties element set consists of properties representing attributes and relationships of the RDA Manifestation entity. RDA Media Type 1.0.0 Media type A categorization reflecting the general type of intermediation device required to view, play, run, etc., the content of a resource. RDA element sets: Nomen properties 1.0.0 RDA Nomen The Nomen properties element set consists of properties representing attributes and relationships of the RDA Nomen entity. RDA element sets: Place properties 1.0.0 RDA Place The Place properties element set consists of properties representing attributes and relationships of the RDA Place entity. RDA element sets: Timespan properties 1.0.0 RDA Timespan The Expression properties element set consists of properties representing attributes and relationships of the RDA Timespan entity. RDA element sets: Unconstrained properties 1.0.0 Properties of all RDA entities The Unconstrained properties element set consists of properties representing the elements of all of the RDA entities. Each property in the element set has semantics which are independent of the LRM model and has no specified domain or range. RDA element sets: Work properties 1.0.0 RDA Work The Work properties element set consists of properties representing attributes and relationships of the RDA Work entity. RDA element sets: Entity properties 1.0.0 RDA Entity The RDA Entity properties element set consists of properties representing elements of the RDA Entity entity. Resource Description Framework (RDF) 1.1.0 RDF This is the RDF Schema for the RDF vocabulary terms in the RDF Namespace, defined in RDF Concepts. RDF Schema 1.1.0 Data-modelling vocabulary for RDF data RDF Schema provides a data-modelling vocabulary for RDF data. RDF Schema is an extension of the basic RDF vocabulary. MARC Code List for Relators Scheme 2017-09-07 Relator terms Relator terms and their associated codes designate the relationship between a name and a bibliographic resource. The relator codes are three-character lowercase alphabetic strings that serve as identifiers. Either the term or the code may be used as controlled values. Records in Contexts Ontology (ICA RiC-O) 0.2 Archives RiC-O (Records in Contexts-Ontology) is an OWL ontology for describing archival record resources. As the second part of Records in Contexts standard, it is a formal representation of Records in Contexts Conceptual Model (RiC-CM). Reconstructions and Observations in Archival Resources (ROAR) 0.1 Archives Ontology to describe person, location etc. observations in archival resources. One or multiple observations can be bundled into a reconstruction that combines complementary (or sometimes conflicting) information from the observation(s) so that a single entity is reconstructed out of several entity observations from one or multiple sources. Schema.org 22.0 Collection of shared vocabularies The Schema.org vocabulary, including the core vocabulary and all domain-specific layers. Shapes Constraint Language (SHACL) 1.0.0 Validation of RDF graphs SHACL Shapes Constraint Language is a language for validating RDF graphs against a set of conditions. These conditions are provided as shapes and other constructs expressed in the form of an RDF graph. RDF graphs that are used in this manner are called \u201cshapes graphs\u201d in SHACL and the RDF graphs that are validated against a shapes graph are called \u201cdata graphs\u201d. As SHACL shape graphs are used to validate that data graphs satisfy a set of conditions they can also be viewed as a description of the data graphs that do satisfy these conditions. Such descriptions may be used for a variety of purposes beside validation, including user interface building, code generation and data integration. Simple Knowledge Organization System (SKOS) 1.2.0 Knowledge organization systems The Simple Knowledge Organization System (SKOS) is a common data model for sharing and linking knowledge organization systems via the Semantic Web. Simple Knowledge Organization System eXtension for Labels 1.4.0 Labels SKOS-XL defines an extension for the Simple Knowledge Organization System, providing additional support for describing and linking lexical entities. SPARQL Service Description 1.1 SPARQL SPARQL Service Description Time Ontology 1.0.0 Temporal properties OWL-Time is an OWL-2 DL ontology of temporal concepts, for describing the temporal properties of resources in the world or described in Web pages. The ontology provides a vocabulary for expressing facts about topological (ordering) relations among instants and intervals, together with information about durations, and about temporal position including date-time information. Time positions and durations may be expressed using either the conventional (Gregorian) calendar and clock, or using another temporal reference system such as Unix-time, geologic time, or different calendars. All Units Ontology 2.1.2 Units of measure Standard units of measure for all units. Vocabulary for Annotating Vocabulary Descriptions (VANN) 1.0.0 Annotation A vocabulary for annotating descriptions of vocabularies with examples and usage notes. Vocabulary of Interlinked Datasets (VoID) 1.0.0 Metadata about RDF datasets VoID is an RDF Schema vocabulary for expressing metadata about RDF datasets. It is intended as a bridge between the publishers and users of RDF data, with applications ranging from data discovery to cataloging and archiving of datasets. WGS84 Geo Positioning 1.22.0 Latitude, longitude and altitude A vocabulary for representing latitude, longitude and altitude information in the WGS84 geodetic reference datum. WGS stands for the World Geodetic Survey.","title":"Vocabularies"},{"location":"triply-etl/generic/vocabularies/#supported-vocabularies","text":"TriplyETL includes out-of-the-box support for a large number of external vocabularies, enumerated in the table below. If you miss an external vocabulary in that table, then let us know via: support@triply.cc With the latest update, TriplyETL vocabularies are now represented as Vocabulary objects, replacing the previous usage of objects with the type IRI . This change may necessitate adjustments to existing ETLs that utilize static vocabularies, such as aat . In this case, the vocabulary would need to be updated to aat.toIri() to ensure compatibility with the correct type. See the external vocabularies section for more information on how to use external vocabularies in ETL configuration. The following table lists the currently supported vocabularies: Name Version Use cases Description Argument Model Ontology (AMO) 1.0 Fake news detection, argumentation structure An ontology for describing argumentation according to Toulmin's argumentation model. Bibliographic Ontology Specification (BIBO) no version info Libraries, citation graphs, bibliography The Bibliographic Ontology Specification provides main concepts and properties for describing citations and bibliographic references (i.e. quotes, books, articles, etc) on the Semantic Web. Building Topology Ontology (BOT) 0.3.2 Buildings The Building Topology Ontology (BOT) is a minimal ontology for describing the core topological concepts of a building. Brick: A uniform metadata schema for buildings no version info Buildings Brick is an open-source effort to standardize semantic descriptions of the physical, logical and virtual assets in buildings and the relationships between them. Cultural Heritage Ontology (CEO) 1.41 Cultural heritage The CEO is the complete semantic representation of the logical data models CHO and KENNIS from the data layer of the RCE. Conceptual Reference Model (CRM) 7.1.2 Cultural heritage The CIDOC Conceptual Reference Model (CRM) provides definitions and a formal structure for describing the implicit and explicit concepts and relationships used in cultural heritage documentation. Conceptual Reference Model (CRM) - Digital no version info Digitization products An ontology and RDF Schema to encode metadata about the steps and methods of production (\u201cprovenance\u201d) of digitization products and synthetic digital representations such as 2D, 3D or even animated Models created by various technologies. Its distinct features compared to competitive models is the complete inclusion of the initial physical measurement processes and their parameters. Conceptual Reference Model (CRM) - PC no version info Cultural heritage CIDOC CRM v7.1.2 module for the implementation of properties of properties in RDFs. DBpedia Ontology 1.0.0 DBpedia Ontology for DBpedia Data Catalog Vocabulary (DCAT) 2.0.0 Data catalogs, datasets DCAT is an RDF vocabulary designed to facilitate interoperability between data catalogs published on the Web. Dublin Core Type Vocabulary 2012-06-14 Classes The DCMI Type Vocabulary was created in 2001. It defines classes for basic types of thing that can be described using DCMI metadata terms. Dublin Core Terms 1.1.0 Metadata terms This document is an up-to-date specification of all metadata terms maintained by the Dublin Core Metadata Initiative, including properties, vocabulary encoding schemes, syntax encoding schemes, and classes. Document Elements Ontology (DEO) 2015-07-03 Rhetorical elements within documents DEO, The Discourse Elements Ontology, is an ontology written in OWL 2 DL that provides a structured vocabulary for rhetorical elements within documents (e.g. Introduction, Discussion, Acknowledgements, Reference List, Figures, Appendix), enabling these to be described in RDF. It uses some of the rhetorical block elements from the SALT Rhetorical Ontology and the Ontology of Rhetorical Blocks. Document Components Ontology (DoCo) 1.3.0 Document components The Document Components Ontology (DoCO) in an ontology that provides a structured vocabulary written of document components, both structural (e.g., block, inline, paragraph, section, chapter) and rhetorical (e.g., introduction, discussion, acknowledgements, reference list, figure, appendix). ERA Vocabulary 2022-02-02 Railway infrastructure Vocabulary defined by the European Union Agency for Railways to describe the concepts and relationships related to the European railway infrastructure and the vehicles authorized to operate over it. FRBR-aligned Bibliographic Ontology (FaBiO) no version info Publishing, bibliography, textual publications An ontology for recording and publishing on the Semantic Web descriptions of entities that are published or potentially publishable, and that contain or are referred to by bibliographic references, or entities used to define such bibliographic references. Friend of a Friend (FOAF) 0.1.0 People, information FOAF is a project devoted to linking people and information using the Web. Regardless of whether information is in people's heads, in physical or digital documents, or in the form of factual data, it can be linked. FOAF integrates three kinds of network: social networks of human collaboration, friendship and association; representational networks that describe a simplified view of a cartoon universe in factual terms, and information networks that use Web-based linking to share independently published descriptions of this inter-connected world. FOAF does not compete with socially-oriented Web sites; rather it provides an approach in which different sites can tell different parts of the larger story, and by which users can retain some control over their information in a non-proprietary format. Functional Requirements for Bibliographic Records (FRBR) 2005-08-10 Bibliography This vocabulary is an expression in RDF of the concepts and relations described in the IFLA report on the Functional Requirements for Bibliographic Records (FRBR). GeoSPARQL 1.0 Geospatial data The OGC GeoSPARQL standard supports representing and querying geospatial data on the Semantic Web. GeoSPARQL defines a vocabulary for representing geospatial data in RDF, and it defines an extension to the SPARQL query language for processing geospatial data. In addition, GeoSPARQL is designed to accommodate systems based on qualitative spatial reasoning and systems based on quantitative spatial computations. Geography Markup Language (GML) Encoding Standard 3.2.1 Geography XML grammar for expressing geographical features. GML serves as a modeling language for geographic systems as well as an open interchange format for geographic transactions on the Internet. Getty Vocabulary Program (GVP) 3.3.0 Classes, properties and values in GVP LOD The GVP Ontology defines classes, properties and values ( skos:Concept s) used in GVP LOD. Linked Art no version info Cultural heritage Linked Art describes cultural heritage resources, with a focus on artworks and museum-oriented activities. It defines common patterns and terms to ensure that the resulting data can be easily used and is based on real-world data and use cases. Metagegevens voor duurzaam toegankelijke overheidsinformatie (MDTO) 1.0 Government information MDTO (Metadata for sustainably accessible government information) is a standard for recording and exchanging unambiguous metadata to enable the sustainable accessibility of government information. Organization ontology 0.8.0 Organizational structures Vocabulary for describing organizational structures, specializable to a broad variety of types of organization. Web Ontology Language (OWL) 2.0.0 Things, groups of things, and relations between things Language (OWL) is a Semantic Web language designed to represent rich and complex knowledge about things, groups of things, and relations between things. Person Name Vocabulary (PNV) 1.1 Persons' names The Person Name Vocabulary (PNV) is an RDF vocabulary and data model for persons' names. It is applicable to many datasets in which persons are described, as it accommodates different levels of data granularity. It furthermore allows for easy alignment of name elements, including idiosyncratic ones, such as family name prefixes and patronymics, with standard vocabularies such as Schema.org, FOAF, DBpedia and Wikidata, thus guaranteeing optimal data interoperability. PREMIS 3 Ontology 3.0.0 Digital objects Ontology for PREMIS 3, the international standard for metadata to support the preservation of digital objects and ensure their long-term usability. PROV Ontology (PROV-O) no version info Provenance information The PROV Ontology (PROV-O) expresses the PROV Data Model using the OWL2 Web Ontology Language (OWL2). It provides a set of classes, properties, and restrictions that can be used to represent and interchange provenance information generated in different systems and under different contexts. It can also be specialized to create new classes and properties to model provenance information for different applications and domains. Data Cube Vocabulary 0.2 Statistical data, multi-dimensional data sets There are many situations where it would be useful to be able to publish multi-dimensional data, such as statistics, on the web in such a way that it can be linked to related data sets and concepts. The Data Cube vocabulary provides a means to do this using the W3C RDF (Resource Description Framework) standard. The model underpinning the Data Cube vocabulary is compatible with the cube model that underlies SDMX (Statistical Data and Metadata eXchange), an ISO standard for exchanging and sharing statistical data and metadata among organizations. The Data Cube vocabulary is a core foundation which supports extension vocabularies to enable publication of other aspects of statistical data flows or other multi-dimensional data sets. Quantities, Units, Dimensions and Types (QUDT) 2.1.2 Physical quantities, units of measure, dimensions The QUDT, or \u201cQuantity, Unit, Dimension and Type\u201d schema defines the base classes properties, and restrictions used for modeling physical quantities, units of measure, and their dimensions in various measurement systems. RDA element sets: Agent properties 1.0.0 RDA Agent The Agent properties element set consists of properties representing attributes and relationships of the RDA Agent , Collective Agent , Person , Family , and Corporate Body entities. RDA element sets: Classes 1.0.0 Classes representing the RDA entities The Classes element set consists of classes representing the RDA entities, including RDA Entity, Work, Expression, Manifestation, Item, Agent, Collective Agent, Person, Family, Corporate Body, Nomen, Place, and Timespan. RDA Content Type 1.0.0 Content A categorization reflecting the fundamental form of communication in which the content is expressed and the human sense through which it is intended to be perceived. RDA Carrier Type 1.0.0 Carrier A categorization reflecting the format of the storage medium and housing of a carrier in combination with the type of intermediation device required to view, play, run, etc., the content of a resource. RDA Element Sets: Expression Properties 1.0.0 RDA Expression properties The Expression properties element set consists of properties representing attributes and relationships of the RDA Expression entity. RDA element sets: Item properties 5.0.12 RDA Item The Item properties element set consists of properties representing attributes and relationships of the RDA Item entity. RDA Element Sets: Manifestation Properties 1.0.0 RDA Manifestation The Manifestation properties element set consists of properties representing attributes and relationships of the RDA Manifestation entity. RDA Media Type 1.0.0 Media type A categorization reflecting the general type of intermediation device required to view, play, run, etc., the content of a resource. RDA element sets: Nomen properties 1.0.0 RDA Nomen The Nomen properties element set consists of properties representing attributes and relationships of the RDA Nomen entity. RDA element sets: Place properties 1.0.0 RDA Place The Place properties element set consists of properties representing attributes and relationships of the RDA Place entity. RDA element sets: Timespan properties 1.0.0 RDA Timespan The Expression properties element set consists of properties representing attributes and relationships of the RDA Timespan entity. RDA element sets: Unconstrained properties 1.0.0 Properties of all RDA entities The Unconstrained properties element set consists of properties representing the elements of all of the RDA entities. Each property in the element set has semantics which are independent of the LRM model and has no specified domain or range. RDA element sets: Work properties 1.0.0 RDA Work The Work properties element set consists of properties representing attributes and relationships of the RDA Work entity. RDA element sets: Entity properties 1.0.0 RDA Entity The RDA Entity properties element set consists of properties representing elements of the RDA Entity entity. Resource Description Framework (RDF) 1.1.0 RDF This is the RDF Schema for the RDF vocabulary terms in the RDF Namespace, defined in RDF Concepts. RDF Schema 1.1.0 Data-modelling vocabulary for RDF data RDF Schema provides a data-modelling vocabulary for RDF data. RDF Schema is an extension of the basic RDF vocabulary. MARC Code List for Relators Scheme 2017-09-07 Relator terms Relator terms and their associated codes designate the relationship between a name and a bibliographic resource. The relator codes are three-character lowercase alphabetic strings that serve as identifiers. Either the term or the code may be used as controlled values. Records in Contexts Ontology (ICA RiC-O) 0.2 Archives RiC-O (Records in Contexts-Ontology) is an OWL ontology for describing archival record resources. As the second part of Records in Contexts standard, it is a formal representation of Records in Contexts Conceptual Model (RiC-CM). Reconstructions and Observations in Archival Resources (ROAR) 0.1 Archives Ontology to describe person, location etc. observations in archival resources. One or multiple observations can be bundled into a reconstruction that combines complementary (or sometimes conflicting) information from the observation(s) so that a single entity is reconstructed out of several entity observations from one or multiple sources. Schema.org 22.0 Collection of shared vocabularies The Schema.org vocabulary, including the core vocabulary and all domain-specific layers. Shapes Constraint Language (SHACL) 1.0.0 Validation of RDF graphs SHACL Shapes Constraint Language is a language for validating RDF graphs against a set of conditions. These conditions are provided as shapes and other constructs expressed in the form of an RDF graph. RDF graphs that are used in this manner are called \u201cshapes graphs\u201d in SHACL and the RDF graphs that are validated against a shapes graph are called \u201cdata graphs\u201d. As SHACL shape graphs are used to validate that data graphs satisfy a set of conditions they can also be viewed as a description of the data graphs that do satisfy these conditions. Such descriptions may be used for a variety of purposes beside validation, including user interface building, code generation and data integration. Simple Knowledge Organization System (SKOS) 1.2.0 Knowledge organization systems The Simple Knowledge Organization System (SKOS) is a common data model for sharing and linking knowledge organization systems via the Semantic Web. Simple Knowledge Organization System eXtension for Labels 1.4.0 Labels SKOS-XL defines an extension for the Simple Knowledge Organization System, providing additional support for describing and linking lexical entities. SPARQL Service Description 1.1 SPARQL SPARQL Service Description Time Ontology 1.0.0 Temporal properties OWL-Time is an OWL-2 DL ontology of temporal concepts, for describing the temporal properties of resources in the world or described in Web pages. The ontology provides a vocabulary for expressing facts about topological (ordering) relations among instants and intervals, together with information about durations, and about temporal position including date-time information. Time positions and durations may be expressed using either the conventional (Gregorian) calendar and clock, or using another temporal reference system such as Unix-time, geologic time, or different calendars. All Units Ontology 2.1.2 Units of measure Standard units of measure for all units. Vocabulary for Annotating Vocabulary Descriptions (VANN) 1.0.0 Annotation A vocabulary for annotating descriptions of vocabularies with examples and usage notes. Vocabulary of Interlinked Datasets (VoID) 1.0.0 Metadata about RDF datasets VoID is an RDF Schema vocabulary for expressing metadata about RDF datasets. It is intended as a bridge between the publishers and users of RDF data, with applications ranging from data discovery to cataloging and archiving of datasets. WGS84 Geo Positioning 1.22.0 Latitude, longitude and altitude A vocabulary for representing latitude, longitude and altitude information in the WGS84 geodetic reference datum. WGS stands for the World Geodetic Survey.","title":"Supported vocabularies"},{"location":"triply-etl/publish/","text":"On this page: Publish Destinations Remote data destinations Publishing datasets to the NDE Dataset Register Local data destinations Static and Dynamic destinations Configuring multiple TriplyDB instances Direct copying from source to destination Using TriplyDB.js in TriplyETL Upload prefix declarations Publish \u00b6 The Publish step makes the linked data that is produced by the TriplyETL pipeline available in a Triple Store for use by others. graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 5 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] Destinations \u00b6 Linked data that is produced by a TriplyETL pipeline can be published to one or more destinations. Remote data destinations \u00b6 Destinations are usually online locations in TriplyDB where the output of your pipeline will be published. If no account name is given, pipeline output is uploaded under the user account tied to the currently used API Token. To upload the output to TriplyDB you can use the toTriplyDb() function, as the snippet below shows. toTriplyDb({dataset: 'my-dataset'}) toTriplyDb({account: 'my-account', dataset: 'my-dataset'}) toTriplyDb({account: 'my-account', dataset: 'my-dataset', opts:{ overwrite: true }}) In the previous versions of TriplyETL, this was done with the toRdf() function as shown below: toRdf(Destination.TriplyDb.rdf('my-account', 'my-dataset', {triplyDb: etl.triplyDb})) It is still possible to upload to TriplyDB using toRdf() , but the new toTriplyDb() function represents a simplified version of this. The following options can be specified to configure the destination behavior: mergeGraphs Whether the results of the new graph should be added to the old graph without overwriting it. The default value is false . overwrite Whether the graphs that are being uploaded by TriplyETL should replace any existing graphs with the same name in the dataset. Graphs appearing in the dataset with a different name than those uploaded by TriplyETL are kept. The default value is false . synchronizeServices Whether or more active services should be automatically synchronized once new data is uploaded. The default value is false . This value can be changed to `true`, to automatically synchronize all services. Alternatively, this value can be set to a string that names a specific services, to automatically synchronize only that specific service. triplyDb A configuration object describing a TriplyDB instance that is different from the one associated with the current API Token. (See the section on configuring multiple TriplyDB instance for more information.) truncateGraphs Whether to delete all graphs in the dataset before uploading any graphs from TriplyETL. Notice that this will also remove graphs that will not be re-uploaded by TriplyETL. The default value is false . Example: The following code snippet publishes linked data to a TriplyDB dataset called 'my-dataset' and synchronizes only the 'acceptance' service for that dataset: toRdf(Destination.TriplyDb.rdf('my-dataset', {synchronizeServices: 'acceptance'})), Publishing datasets to the NDE Dataset Register \u00b6 If you wat to publish a dataset to the NDE Dataset Register, you can do it by adding the {submitToNDEDatasetRegister: true} option to toTriplyDB() middleware. toTriplyDb({dataset: 'nde', opts: {submitToNDEDatasetRegister: true}}) Local data destinations \u00b6 TriplyETL supports publishing RDF output into a local file. This is not often used, because files lack many of the features that TriplyDB destinations support, such as: The ability to browse the data. The ability to query the data. The ability to configure metadata. The ability to configure prefix declarations. Still, there may be cases in which a local file destination is useful, for example when you do not have an active Internet connection: toRdf(Destination.file('my-file.trig')), Static and Dynamic destinations \u00b6 Destinations can be defined as static objects meaning that you can define destination beforehand. But it might be the case that you want to have multiple destinations for different records. In this case, you would need a dynamic destination, which should change based on certain information inside your source data. You can set static and dynamic destinations: const etl = new Etl({ sources: { someSource: Source.file('source.trig'), }, destinations: { someStaticDestination: Destination.file('static.ttl'), someDynamicDestination: context => Destination.file(context.getString('destination')), }, }) Configuring multiple TriplyDB instances \u00b6 It is possible to use multiple TriplyDB instances in one TriplyETL pipeline. The following example illustrates how the data model is used from the production instance of TriplyDB. const etl = new Etl({ sources: { data_model: Source.TriplyDb.rdf( 'my-account', 'my-dataset', { triplyDb: { token: process.env['PRODUCTION_INSTANCE_TOKEN'], url: 'https://api.production.example.com' } } ), instance_data: Source.TriplyDb.rdf( 'my-account', 'my-dataset', { triplyDb: { token: process.env['ACCEPTANCE_INSTANCE_TOKEN'], url: 'https://api.acceptance.example.com' } } ), }, }) Direct copying from source to destination \u00b6 TriplyETL supports copying sources directly to destination locations. This function is useful when you already have linked data that is used as a source, but is also needed at the destination. An example would be the information model. This would be available as a source, and with the copy function it can be uploaded to TriplyDB via TriplyETL. The following example shows the copy function: await etl.copySource( Source.file(`${source_location}`), Destination.TriplyDb.rdf(`${destination_name}`) ) The function destination expects that source data is linked data. Copying a source that is not linked data can result in errors. Please note that the copySource function is not considered part of the middleware layer but is a specialized function used for direct source-to-destination copying. As a result, it won't be counted in the middleware runtime. Using TriplyDB.js in TriplyETL \u00b6 All operations that can be performed in a TriplyDB instance can be automated with classes and methods in the TriplyDB.js library. This library is also used by TriplyETL in the background to implement many of the TriplyETL functionalities. Sometimes it is useful to use classes and methods in TriplyDB.js directly. This is done in the following way: // Create the ETL context. const etl = new Etl() // Use the context to access the TriplyDB.js connection. console.log((await etl.triplyDb.getInfo()).name) The above example prints the name of the TriplyDB instance. But any other TriplyDB.js operations can be performed. For example, the user of the current API Token can change their avatar image in TriplyDB: const user = await etl.triplyDb.getUser() await user.setAvatar('my-avatar.png') Upload prefix declarations \u00b6 At the end of a TriplyETL script, it is common to upload the prefix declarations that are configured for that pipeline. This is often done directly before or after graphs are uploaded (function toTriplyDb() ): import { toTriplyDb, uploadPrefixes } from '@triplyetl/etl/generic' const prefix = { // Your prefix declarations. } export default async function(): Promise { const etl = new Etl({ prefixes: prefix }) etl.run( // You ETL pipeline. toTriplyDb({ account: 'my-account', dataset: 'my-dataset' }), uploadPrefixes({ account: 'my-account', dataset: 'my-dataset' }), ) return etl }","title":"Publish"},{"location":"triply-etl/publish/#publish","text":"The Publish step makes the linked data that is produced by the TriplyETL pipeline available in a Triple Store for use by others. graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 5 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources]","title":"Publish"},{"location":"triply-etl/publish/#destinations","text":"Linked data that is produced by a TriplyETL pipeline can be published to one or more destinations.","title":"Destinations"},{"location":"triply-etl/publish/#remote-data-destinations","text":"Destinations are usually online locations in TriplyDB where the output of your pipeline will be published. If no account name is given, pipeline output is uploaded under the user account tied to the currently used API Token. To upload the output to TriplyDB you can use the toTriplyDb() function, as the snippet below shows. toTriplyDb({dataset: 'my-dataset'}) toTriplyDb({account: 'my-account', dataset: 'my-dataset'}) toTriplyDb({account: 'my-account', dataset: 'my-dataset', opts:{ overwrite: true }}) In the previous versions of TriplyETL, this was done with the toRdf() function as shown below: toRdf(Destination.TriplyDb.rdf('my-account', 'my-dataset', {triplyDb: etl.triplyDb})) It is still possible to upload to TriplyDB using toRdf() , but the new toTriplyDb() function represents a simplified version of this. The following options can be specified to configure the destination behavior: mergeGraphs Whether the results of the new graph should be added to the old graph without overwriting it. The default value is false . overwrite Whether the graphs that are being uploaded by TriplyETL should replace any existing graphs with the same name in the dataset. Graphs appearing in the dataset with a different name than those uploaded by TriplyETL are kept. The default value is false . synchronizeServices Whether or more active services should be automatically synchronized once new data is uploaded. The default value is false . This value can be changed to `true`, to automatically synchronize all services. Alternatively, this value can be set to a string that names a specific services, to automatically synchronize only that specific service. triplyDb A configuration object describing a TriplyDB instance that is different from the one associated with the current API Token. (See the section on configuring multiple TriplyDB instance for more information.) truncateGraphs Whether to delete all graphs in the dataset before uploading any graphs from TriplyETL. Notice that this will also remove graphs that will not be re-uploaded by TriplyETL. The default value is false . Example: The following code snippet publishes linked data to a TriplyDB dataset called 'my-dataset' and synchronizes only the 'acceptance' service for that dataset: toRdf(Destination.TriplyDb.rdf('my-dataset', {synchronizeServices: 'acceptance'})),","title":"Remote data destinations"},{"location":"triply-etl/publish/#publishing-datasets-to-the-nde-dataset-register","text":"If you wat to publish a dataset to the NDE Dataset Register, you can do it by adding the {submitToNDEDatasetRegister: true} option to toTriplyDB() middleware. toTriplyDb({dataset: 'nde', opts: {submitToNDEDatasetRegister: true}})","title":"Publishing datasets to the NDE Dataset Register"},{"location":"triply-etl/publish/#local-data-destinations","text":"TriplyETL supports publishing RDF output into a local file. This is not often used, because files lack many of the features that TriplyDB destinations support, such as: The ability to browse the data. The ability to query the data. The ability to configure metadata. The ability to configure prefix declarations. Still, there may be cases in which a local file destination is useful, for example when you do not have an active Internet connection: toRdf(Destination.file('my-file.trig')),","title":"Local data destinations"},{"location":"triply-etl/publish/#static-and-dynamic-destinations","text":"Destinations can be defined as static objects meaning that you can define destination beforehand. But it might be the case that you want to have multiple destinations for different records. In this case, you would need a dynamic destination, which should change based on certain information inside your source data. You can set static and dynamic destinations: const etl = new Etl({ sources: { someSource: Source.file('source.trig'), }, destinations: { someStaticDestination: Destination.file('static.ttl'), someDynamicDestination: context => Destination.file(context.getString('destination')), }, })","title":"Static and Dynamic destinations"},{"location":"triply-etl/publish/#configuring-multiple-triplydb-instances","text":"It is possible to use multiple TriplyDB instances in one TriplyETL pipeline. The following example illustrates how the data model is used from the production instance of TriplyDB. const etl = new Etl({ sources: { data_model: Source.TriplyDb.rdf( 'my-account', 'my-dataset', { triplyDb: { token: process.env['PRODUCTION_INSTANCE_TOKEN'], url: 'https://api.production.example.com' } } ), instance_data: Source.TriplyDb.rdf( 'my-account', 'my-dataset', { triplyDb: { token: process.env['ACCEPTANCE_INSTANCE_TOKEN'], url: 'https://api.acceptance.example.com' } } ), }, })","title":"Configuring multiple TriplyDB instances"},{"location":"triply-etl/publish/#direct-copying-from-source-to-destination","text":"TriplyETL supports copying sources directly to destination locations. This function is useful when you already have linked data that is used as a source, but is also needed at the destination. An example would be the information model. This would be available as a source, and with the copy function it can be uploaded to TriplyDB via TriplyETL. The following example shows the copy function: await etl.copySource( Source.file(`${source_location}`), Destination.TriplyDb.rdf(`${destination_name}`) ) The function destination expects that source data is linked data. Copying a source that is not linked data can result in errors. Please note that the copySource function is not considered part of the middleware layer but is a specialized function used for direct source-to-destination copying. As a result, it won't be counted in the middleware runtime.","title":"Direct copying from source to destination"},{"location":"triply-etl/publish/#using-triplydbjs-in-triplyetl","text":"All operations that can be performed in a TriplyDB instance can be automated with classes and methods in the TriplyDB.js library. This library is also used by TriplyETL in the background to implement many of the TriplyETL functionalities. Sometimes it is useful to use classes and methods in TriplyDB.js directly. This is done in the following way: // Create the ETL context. const etl = new Etl() // Use the context to access the TriplyDB.js connection. console.log((await etl.triplyDb.getInfo()).name) The above example prints the name of the TriplyDB instance. But any other TriplyDB.js operations can be performed. For example, the user of the current API Token can change their avatar image in TriplyDB: const user = await etl.triplyDb.getUser() await user.setAvatar('my-avatar.png')","title":"Using TriplyDB.js in TriplyETL"},{"location":"triply-etl/publish/#upload-prefix-declarations","text":"At the end of a TriplyETL script, it is common to upload the prefix declarations that are configured for that pipeline. This is often done directly before or after graphs are uploaded (function toTriplyDb() ): import { toTriplyDb, uploadPrefixes } from '@triplyetl/etl/generic' const prefix = { // Your prefix declarations. } export default async function(): Promise { const etl = new Etl({ prefixes: prefix }) etl.run( // You ETL pipeline. toTriplyDb({ account: 'my-account', dataset: 'my-dataset' }), uploadPrefixes({ account: 'my-account', dataset: 'my-dataset' }), ) return etl }","title":"Upload prefix declarations"},{"location":"triply-etl/sources/","text":"On this page: Sources Sources \u00b6 TriplyETL Sources are locations that hold data that can be extracted with one or more TriplyETL extractors . graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations style sources fill:#f9f,stroke:#333,stroke-width:4px destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] The following kinds of sources are supported: APIs Inline JSON Inline strings Local files Online files TriplyDB Assets TriplyDB Datasets TriplyDB Queries","title":"Overview"},{"location":"triply-etl/sources/#sources","text":"TriplyETL Sources are locations that hold data that can be extracted with one or more TriplyETL extractors . graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations style sources fill:#f9f,stroke:#333,stroke-width:4px destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] The following kinds of sources are supported: APIs Inline JSON Inline strings Local files Online files TriplyDB Assets TriplyDB Datasets TriplyDB Queries","title":"Sources"},{"location":"triply-etl/sources/apis/","text":"On this page: APIs Raw SPARQL endpoints Use in production systems APIs \u00b6 The URL source type can also be used to extract records from online endpoints and APIs. The following code snippet extracts records from a TriplyDB RESTful API: fromJson(Source.url('https://api.triplydb.com/datasets')), Raw SPARQL endpoints \u00b6 SPARQL endpoints are online APIs. The following code snippet issues a raw SPARQL query against a public SPARQL endpoint. Since we specified CSV as the result set format (Media Type text/csv ), the result set can be accessed as any other CSV source: fromCsv( Source.url( 'https://dbpedia.org/sparql', { request: { headers: { accept: 'text/csv', 'content-type': 'application/query-string', }, body: 'select * { ?s ?p ?o. } limit 1', method: 'POST', }, } ) ) Use in production systems \u00b6 Raw SPARQL endpoints lack several features that are essential for use in production systems: - secure access control - pagination - reliable retrieval of large result sets - API variables - versioning These features are all supported by TriplyDB queries . It is therefore simpler and safer to use TriplyDB queries. Still, when used outside of production systems, raw SPARQL endpoints can still be used as regular web APIs.","title":"APIs"},{"location":"triply-etl/sources/apis/#apis","text":"The URL source type can also be used to extract records from online endpoints and APIs. The following code snippet extracts records from a TriplyDB RESTful API: fromJson(Source.url('https://api.triplydb.com/datasets')),","title":"APIs"},{"location":"triply-etl/sources/apis/#raw-sparql-endpoints","text":"SPARQL endpoints are online APIs. The following code snippet issues a raw SPARQL query against a public SPARQL endpoint. Since we specified CSV as the result set format (Media Type text/csv ), the result set can be accessed as any other CSV source: fromCsv( Source.url( 'https://dbpedia.org/sparql', { request: { headers: { accept: 'text/csv', 'content-type': 'application/query-string', }, body: 'select * { ?s ?p ?o. } limit 1', method: 'POST', }, } ) )","title":"Raw SPARQL endpoints"},{"location":"triply-etl/sources/apis/#use-in-production-systems","text":"Raw SPARQL endpoints lack several features that are essential for use in production systems: - secure access control - pagination - reliable retrieval of large result sets - API variables - versioning These features are all supported by TriplyDB queries . It is therefore simpler and safer to use TriplyDB queries. Still, when used outside of production systems, raw SPARQL endpoints can still be used as regular web APIs.","title":"Use in production systems"},{"location":"triply-etl/sources/inline-json/","text":"On this page: Inline JSON Inline JSON \u00b6 Because TriplyETL configurations are implemented in TypeScript, it is possible to specify JSON data inline with TypeScript Objects. JSON is the only data format that be specified in such a native inline way in TriplyETL. The following code snippet specifies two records using inline TypeScript objects: fromJson([ { id: '123', name: 'John' }, { id: '456', name: 'Jane' }, ]), This results in the following two records: { \"id\": \"123\", \"name\": \"John\" } { \"id\": \"456\", \"name\": \"Jane\" } In documentation, we often use such inline JSON sources since that makes code snippets self-contained, without having to rely on external sources such as files. In production systems this native inline source type is almost never used.","title":"Inline JSON"},{"location":"triply-etl/sources/inline-json/#inline-json","text":"Because TriplyETL configurations are implemented in TypeScript, it is possible to specify JSON data inline with TypeScript Objects. JSON is the only data format that be specified in such a native inline way in TriplyETL. The following code snippet specifies two records using inline TypeScript objects: fromJson([ { id: '123', name: 'John' }, { id: '456', name: 'Jane' }, ]), This results in the following two records: { \"id\": \"123\", \"name\": \"John\" } { \"id\": \"456\", \"name\": \"Jane\" } In documentation, we often use such inline JSON sources since that makes code snippets self-contained, without having to rely on external sources such as files. In production systems this native inline source type is almost never used.","title":"Inline JSON"},{"location":"triply-etl/sources/inline-strings/","text":"On this page: Inline strings Inline strings \u00b6 Data in the JSON or RDF formats can be specified with inline strings. The following code snippet loads triples into the Internal Store: loadRdf( Source.string(` prefix person: prefix sdo: person:1 a sdo:Person; sdo:name 'J. Doe'.`), { contentType: 'text/turtle' } ), This loads the following triples: graph LR person:1 -- a --> sdo:Person person:1 -- sdo:name --> J.Doe Notice that we must specify the RDF serialization format that we use. This is necessary because loadRdf() supports a large number of formats, some of which are difficult to autodetect. The following formats are supported: Format contentType value HTML 'text/html' JSON-LD 'application/ld+json' JSON 'application/json' N-Quads 'application/n-quads' N-Triples 'application/n-triples' N3 'text/n3' RDF/XML 'application/rdf+xml' SVG 'image/svg+xml' TriG 'application/trig' Turtle 'text/turtle' XHTML 'application/xhtml+xml' XML 'application/xml' The following example makes RDF source data available to the SHACL validate() function: import { Source } from '@triplyetl/etl/generic' import { validate } from '@triplyetl/etl/shacl' validate(Source.string(` prefix sh: prefix shp: prefix sdo: shp:Person a sh:NodeShape; sh:property shp:Person_name; sh:targetClass sdo:Person. shp:Person_name a sh:PropertyShape; sh:datatype xsd:string; sh:minLength 1; sh:path sdo:name.`)) This makes the following linked data SHACL specification available: graph LR shp:Person -- a --> sh:NodeShape shp:Person -- sh:property --> shp:Person_name shp:Person -- sh:targetClass --> sdo:Person shp:Person_name -- a --> sh:PropertyShape shp:Person_name -- sh:datatype --> xsd:string shp:Person_name -- sh:minLength --> 1 shp:Person_name -- sh:path --> sdo:name Notice that validate() does not require us to set the content-type, since it only supports N-Quads, N-Triples, TriG and Turtle (and these formats can be detected automatically). The following example makes a string source available to the fromJson() source extractor: fromJson(Source.string(` [ { id: '123', name: 'John' }, { id: '456', name: 'Jane' } ]`)), Notice that the inline JSON source is often a more intuitive specification format for the fromJson() source extractor than its corresponding string source. While inline JSON and string sources are mostly used for small examples, local files are somewhat more widely used.","title":"Inline Strings"},{"location":"triply-etl/sources/inline-strings/#inline-strings","text":"Data in the JSON or RDF formats can be specified with inline strings. The following code snippet loads triples into the Internal Store: loadRdf( Source.string(` prefix person: prefix sdo: person:1 a sdo:Person; sdo:name 'J. Doe'.`), { contentType: 'text/turtle' } ), This loads the following triples: graph LR person:1 -- a --> sdo:Person person:1 -- sdo:name --> J.Doe Notice that we must specify the RDF serialization format that we use. This is necessary because loadRdf() supports a large number of formats, some of which are difficult to autodetect. The following formats are supported: Format contentType value HTML 'text/html' JSON-LD 'application/ld+json' JSON 'application/json' N-Quads 'application/n-quads' N-Triples 'application/n-triples' N3 'text/n3' RDF/XML 'application/rdf+xml' SVG 'image/svg+xml' TriG 'application/trig' Turtle 'text/turtle' XHTML 'application/xhtml+xml' XML 'application/xml' The following example makes RDF source data available to the SHACL validate() function: import { Source } from '@triplyetl/etl/generic' import { validate } from '@triplyetl/etl/shacl' validate(Source.string(` prefix sh: prefix shp: prefix sdo: shp:Person a sh:NodeShape; sh:property shp:Person_name; sh:targetClass sdo:Person. shp:Person_name a sh:PropertyShape; sh:datatype xsd:string; sh:minLength 1; sh:path sdo:name.`)) This makes the following linked data SHACL specification available: graph LR shp:Person -- a --> sh:NodeShape shp:Person -- sh:property --> shp:Person_name shp:Person -- sh:targetClass --> sdo:Person shp:Person_name -- a --> sh:PropertyShape shp:Person_name -- sh:datatype --> xsd:string shp:Person_name -- sh:minLength --> 1 shp:Person_name -- sh:path --> sdo:name Notice that validate() does not require us to set the content-type, since it only supports N-Quads, N-Triples, TriG and Turtle (and these formats can be detected automatically). The following example makes a string source available to the fromJson() source extractor: fromJson(Source.string(` [ { id: '123', name: 'John' }, { id: '456', name: 'Jane' } ]`)), Notice that the inline JSON source is often a more intuitive specification format for the fromJson() source extractor than its corresponding string source. While inline JSON and string sources are mostly used for small examples, local files are somewhat more widely used.","title":"Inline strings"},{"location":"triply-etl/sources/local-files/","text":"On this page: Local files Basic usage Multiple local files Production systems Local files \u00b6 Local files are files that are on the same computer that the TriplyETL pipeline runs on. TriplyETL supports local files with the Source.file() function. Basic usage \u00b6 Local files are declared by using the Source object, which is imported in the following way: import { Source } from '@triplyetl/etl/generic' The following code snippet uses a local JSON file. The local file is used by the fromJson() extractor : fromJson(Source.file('./static/example.json')), Multiple local files \u00b6 It is possible to specify one or more local files, by using array notation. The following code snippet extracts records from a large number of local JSON files: fromJson(Source.file([ './static/data-001.json', './static/data-002.json', ..., './static/data-999.json', ])), Production systems \u00b6 Local files are not typically used in production systems. The reason for this is that it is difficult to guarantee that all project partners have exactly the same local files on their respective computers. The risk of using outdated files, and the overhead of securely sharing files with multiple team members, are often sufficient reason to use TriplyDB Assets instead.","title":"Local Files"},{"location":"triply-etl/sources/local-files/#local-files","text":"Local files are files that are on the same computer that the TriplyETL pipeline runs on. TriplyETL supports local files with the Source.file() function.","title":"Local files"},{"location":"triply-etl/sources/local-files/#basic-usage","text":"Local files are declared by using the Source object, which is imported in the following way: import { Source } from '@triplyetl/etl/generic' The following code snippet uses a local JSON file. The local file is used by the fromJson() extractor : fromJson(Source.file('./static/example.json')),","title":"Basic usage"},{"location":"triply-etl/sources/local-files/#multiple-local-files","text":"It is possible to specify one or more local files, by using array notation. The following code snippet extracts records from a large number of local JSON files: fromJson(Source.file([ './static/data-001.json', './static/data-002.json', ..., './static/data-999.json', ])),","title":"Multiple local files"},{"location":"triply-etl/sources/local-files/#production-systems","text":"Local files are not typically used in production systems. The reason for this is that it is difficult to guarantee that all project partners have exactly the same local files on their respective computers. The risk of using outdated files, and the overhead of securely sharing files with multiple team members, are often sufficient reason to use TriplyDB Assets instead.","title":"Production systems"},{"location":"triply-etl/sources/online-files/","text":"On this page: Online files Basic usage Authorization Other HTTP options Use in production systems Online files \u00b6 Online files are files that are publishing on some (public or private) server. TriplyETL supports online files with the Source.url() function. Basic usage \u00b6 Online files are declared by using the Source object, which is imported in the following way: import { Source } from '@triplyetl/etl/generic' The following code snippet uses a public online file. The files is used by the fromJson() extractor: fromJson(Source.url('https://somewhere.com/example.json')), Authorization \u00b6 It is possible to access online files that are not publicly available. In such cases, the HTTP Authorization header must be specified. The following code snippet uses the options object of Source.url() to specify the authorization header that is necessary to access the specified online file: fromJson( Source.url( 'https://somewhere.com/example.json', { request: { headers: { Authorization: `Basic ${username}:${password}` } } } ) ), Other HTTP options \u00b6 TriplyETL uses the node-fetch library to implement Source.url() . This means that all options supported by that library are also supported for online files. For example, the following code snippet specifies the media type that is requested from an online location. Specifically, it requests the Turtle representation of the Amsterdam resource from DBpedia: loadRdf( Source.url( 'https://dbpedia.org/Amsterdam', { request: { headers: { Accept: 'text/turtle' } } } ) ), Use in production systems \u00b6 Online files are typically not used in production pipelines, because the availability of many Internet resources is outside of the control of the project team. Internet resources that are not maintained by team members may be subject to content-wise changes, which may affect the production pipeline. If the project team controls the Internet resources, then risks are smaller. But at that point it is even better to upload the online files as TriplyDB asset for additional benefits such as access controls.","title":"Online Files"},{"location":"triply-etl/sources/online-files/#online-files","text":"Online files are files that are publishing on some (public or private) server. TriplyETL supports online files with the Source.url() function.","title":"Online files"},{"location":"triply-etl/sources/online-files/#basic-usage","text":"Online files are declared by using the Source object, which is imported in the following way: import { Source } from '@triplyetl/etl/generic' The following code snippet uses a public online file. The files is used by the fromJson() extractor: fromJson(Source.url('https://somewhere.com/example.json')),","title":"Basic usage"},{"location":"triply-etl/sources/online-files/#authorization","text":"It is possible to access online files that are not publicly available. In such cases, the HTTP Authorization header must be specified. The following code snippet uses the options object of Source.url() to specify the authorization header that is necessary to access the specified online file: fromJson( Source.url( 'https://somewhere.com/example.json', { request: { headers: { Authorization: `Basic ${username}:${password}` } } } ) ),","title":"Authorization"},{"location":"triply-etl/sources/online-files/#other-http-options","text":"TriplyETL uses the node-fetch library to implement Source.url() . This means that all options supported by that library are also supported for online files. For example, the following code snippet specifies the media type that is requested from an online location. Specifically, it requests the Turtle representation of the Amsterdam resource from DBpedia: loadRdf( Source.url( 'https://dbpedia.org/Amsterdam', { request: { headers: { Accept: 'text/turtle' } } } ) ),","title":"Other HTTP options"},{"location":"triply-etl/sources/online-files/#use-in-production-systems","text":"Online files are typically not used in production pipelines, because the availability of many Internet resources is outside of the control of the project team. Internet resources that are not maintained by team members may be subject to content-wise changes, which may affect the production pipeline. If the project team controls the Internet resources, then risks are smaller. But at that point it is even better to upload the online files as TriplyDB asset for additional benefits such as access controls.","title":"Use in production systems"},{"location":"triply-etl/sources/rml/","text":"On this page: RML Sources RML Sources \u00b6","title":"RML Sources"},{"location":"triply-etl/sources/rml/#rml-sources","text":"","title":"RML Sources"},{"location":"triply-etl/sources/triplydb-assets/","text":"On this page: TriplyDB Assets Filtering Versioning Access TriplyDB instance Compression TriplyDB Assets \u00b6 Assets are a core feature of TriplyDB. Assets allow arbitrary files to be stored in the context of a linked dataset. A typical use case for assets is to upload (new versions of) source files. The TriplyETL pipeline can pick the latest versions of these source files and publish the resulting linked data in the the same dataset. The following code snippet uses a JSON source that is stored in a TriplyDB asset: fromJson( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.json' } ) ), As with other TriplyDB sources, the account name is optional. When omitted, the user account that is associated with the current API Token is used: loadRdf( Source.TriplyDb.rdf('my-dataset', { name: 'example.json' }) ), As with other source type, multiple assets can be specified: fromCsv([ Source.TriplyDb.asset('my-dataset', { name: 'table1.csv' }), Source.TriplyDb.asset('my-dataset', { name: 'table2.csv' }), ]), Filtering \u00b6 If the asset name is omitted, all assets are returned. This is often unpractical, since only some assets must be processed. For example, if a dataset has PDF and JSON assets, only the latter should be processed by the fromJson() source extractor. For such use cases the filter option can be used instead of the name option. The filter option takes a TypeScript function that maps assets names onto Boolean values (true or false). Only the assets for which the function returns truth are included. The following snippet processes all and only assets whose name ends in .json : fromJson( Source.TriplyDb.asset( 'my-dataset', { filter: name => name.endsWith('json') } ) ), Versioning \u00b6 It is possible to upload new versions of an existing TriplyDB asset. When no specific version is specified, a TriplyETL pipeline will use the latest version automatically. In order to use a specific version, the version option can be set to a version number. The following snippet uses a specific version of an asset: fromJson( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.json', assetVersion: 2 } ) ), Access \u00b6 Since TriplyDB assets are part of a TriplyDB dataset: - they are accessible under the same access level as the rest of the dataset, and - they are accessible with the same API Token that allows linked data to be published in that dataset. Notice that this makes it easier and safer to deal with source data that is not public. When private data is retrieved from online files or APIs , authorization information must be configured at the HTTP level. This is possible but cumbersome. And, depending on the authentication approach, it is required to create a new API Token and securely configure that in addition to the TriplyDB API Token. Notice that access also is more transparent when TriplyDB assets are used. All and only collaborators that have access to the TriplyDB dataset also have access to the source data. It is clear for all collaborators which source files should be used, and which versions are available. This is more transparent than having to share (multiple versions of) source files over email or by other indirect means. TriplyDB instance \u00b6 By default, assets are loaded from the TriplyDB instance that is associated with the currently used API Token. In some situations it is useful to connect to a linked dataset from a different TriplyDB instance. This can be configured with the triplyDb option. The following snippet loads the OWL vocabulary from TriplyDB.com. Notice that the URL of the API must be specified; this is different from the URL of the web-based GUI. loadRdf( Source.TriplyDb.rdf( 'w3c', 'owl', { triplyDb: { url: 'https://triplydb.com' } } ) ), If an asset is part of a non-public dataset, specifying the URL is insufficient. In such cases an API Token from this other TriplyDB instance must be created and configured using the token option in combination with the url option. Compression \u00b6 Source data is often text-based. This means that such source data can often be compressed to minimize storage space and/or Internet bandwidth. TriplyETL provides automatic support for the GNU zip (file name extension *.gz ) compression format. The following snippet uses a TriplyDB assets that was compressed with GNU zip (file extension *.gz ): fromCsv(Source.TriplyDb.asset('my-dataset', { name: 'example.csv.gz' })),","title":"TriplyDB Assets"},{"location":"triply-etl/sources/triplydb-assets/#triplydb-assets","text":"Assets are a core feature of TriplyDB. Assets allow arbitrary files to be stored in the context of a linked dataset. A typical use case for assets is to upload (new versions of) source files. The TriplyETL pipeline can pick the latest versions of these source files and publish the resulting linked data in the the same dataset. The following code snippet uses a JSON source that is stored in a TriplyDB asset: fromJson( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.json' } ) ), As with other TriplyDB sources, the account name is optional. When omitted, the user account that is associated with the current API Token is used: loadRdf( Source.TriplyDb.rdf('my-dataset', { name: 'example.json' }) ), As with other source type, multiple assets can be specified: fromCsv([ Source.TriplyDb.asset('my-dataset', { name: 'table1.csv' }), Source.TriplyDb.asset('my-dataset', { name: 'table2.csv' }), ]),","title":"TriplyDB Assets"},{"location":"triply-etl/sources/triplydb-assets/#filtering","text":"If the asset name is omitted, all assets are returned. This is often unpractical, since only some assets must be processed. For example, if a dataset has PDF and JSON assets, only the latter should be processed by the fromJson() source extractor. For such use cases the filter option can be used instead of the name option. The filter option takes a TypeScript function that maps assets names onto Boolean values (true or false). Only the assets for which the function returns truth are included. The following snippet processes all and only assets whose name ends in .json : fromJson( Source.TriplyDb.asset( 'my-dataset', { filter: name => name.endsWith('json') } ) ),","title":"Filtering"},{"location":"triply-etl/sources/triplydb-assets/#versioning","text":"It is possible to upload new versions of an existing TriplyDB asset. When no specific version is specified, a TriplyETL pipeline will use the latest version automatically. In order to use a specific version, the version option can be set to a version number. The following snippet uses a specific version of an asset: fromJson( Source.TriplyDb.asset( 'some-account', 'some-dataset', { name: 'example.json', assetVersion: 2 } ) ),","title":"Versioning"},{"location":"triply-etl/sources/triplydb-assets/#access","text":"Since TriplyDB assets are part of a TriplyDB dataset: - they are accessible under the same access level as the rest of the dataset, and - they are accessible with the same API Token that allows linked data to be published in that dataset. Notice that this makes it easier and safer to deal with source data that is not public. When private data is retrieved from online files or APIs , authorization information must be configured at the HTTP level. This is possible but cumbersome. And, depending on the authentication approach, it is required to create a new API Token and securely configure that in addition to the TriplyDB API Token. Notice that access also is more transparent when TriplyDB assets are used. All and only collaborators that have access to the TriplyDB dataset also have access to the source data. It is clear for all collaborators which source files should be used, and which versions are available. This is more transparent than having to share (multiple versions of) source files over email or by other indirect means.","title":"Access"},{"location":"triply-etl/sources/triplydb-assets/#triplydb-instance","text":"By default, assets are loaded from the TriplyDB instance that is associated with the currently used API Token. In some situations it is useful to connect to a linked dataset from a different TriplyDB instance. This can be configured with the triplyDb option. The following snippet loads the OWL vocabulary from TriplyDB.com. Notice that the URL of the API must be specified; this is different from the URL of the web-based GUI. loadRdf( Source.TriplyDb.rdf( 'w3c', 'owl', { triplyDb: { url: 'https://triplydb.com' } } ) ), If an asset is part of a non-public dataset, specifying the URL is insufficient. In such cases an API Token from this other TriplyDB instance must be created and configured using the token option in combination with the url option.","title":"TriplyDB instance"},{"location":"triply-etl/sources/triplydb-assets/#compression","text":"Source data is often text-based. This means that such source data can often be compressed to minimize storage space and/or Internet bandwidth. TriplyETL provides automatic support for the GNU zip (file name extension *.gz ) compression format. The following snippet uses a TriplyDB assets that was compressed with GNU zip (file extension *.gz ): fromCsv(Source.TriplyDb.asset('my-dataset', { name: 'example.csv.gz' })),","title":"Compression"},{"location":"triply-etl/sources/triplydb-datasets/","text":"TriplyDB datasets \u00b6 Datasets in TriplyDB store linked data in one or more graphs. Such datasets can be loaded as a TriplyETL source. The following snippet loads a dataset from TriplyDB into the internal RDF store of TriplyETL: loadRdf(Source.TriplyDb.rdf('my-account', 'my-dataset')), As with other TriplyDB sources, the account name is optional. When omitted, a dataset from the user account that is associated with the current API Token is used: loadRdf(Source.TriplyDb.rdf('my-dataset')), Graphs option \u00b6 By default, all graphs from a linked dataset are loaded. It is possible to specify a only those graphs that should be loaded. The following snippet only loads the data model, but not the instance data: loadRdf( Source.TriplyDb.rdf( 'my-account', 'my-dataset', { graphs: ['https://example.com/id/graph/model'] } ) ), TriplyDB instance \u00b6 The triplyDb option can be used to specify that a linked dataset from a different TriplyDB instance should be used. This option works in the same way as for TriplyDB assets: link","title":"TriplyDB Datasets"},{"location":"triply-etl/sources/triplydb-datasets/#triplydb-datasets","text":"Datasets in TriplyDB store linked data in one or more graphs. Such datasets can be loaded as a TriplyETL source. The following snippet loads a dataset from TriplyDB into the internal RDF store of TriplyETL: loadRdf(Source.TriplyDb.rdf('my-account', 'my-dataset')), As with other TriplyDB sources, the account name is optional. When omitted, a dataset from the user account that is associated with the current API Token is used: loadRdf(Source.TriplyDb.rdf('my-dataset')),","title":"TriplyDB datasets"},{"location":"triply-etl/sources/triplydb-datasets/#graphs-option","text":"By default, all graphs from a linked dataset are loaded. It is possible to specify a only those graphs that should be loaded. The following snippet only loads the data model, but not the instance data: loadRdf( Source.TriplyDb.rdf( 'my-account', 'my-dataset', { graphs: ['https://example.com/id/graph/model'] } ) ),","title":"Graphs option"},{"location":"triply-etl/sources/triplydb-datasets/#triplydb-instance","text":"The triplyDb option can be used to specify that a linked dataset from a different TriplyDB instance should be used. This option works in the same way as for TriplyDB assets: link","title":"TriplyDB instance"},{"location":"triply-etl/sources/triplydb-queries/","text":"On this page: TriplyDB Queries SPARQL Ask queries SPARQL Construct and Describe queries SPARQL Select queries Versioning API variables Pagination Result graph TriplyDB instance TriplyDB Queries \u00b6 Saved SPARQL queries in TriplyDB can be used as data sources. SPARQL queries are very powerful data sources, since they allow complex filters to be expressed. There are 4 SPARQL query forms, with different source extractors that can process their results: Query form Source extractor SPARQL Ask fromJson() , fromXml() SPARQL Construct loadRdf() SPARQL Describe loadRdf() SPARQL Select fromCsv() , fromJson() , fromTsv() , fromXml() SPARQL Ask queries \u00b6 SPARQL Ask queries can return data in either the JSON or the XML format. This allows them to be processed with the extractors fromCsv() and fromXml() . The following code snippet connects to the XML results of a SPARQL Ask query in TriplyDB: fromXml(Source.TriplyDb.query('my-account', 'my-ask-query')), SPARQL Construct and Describe queries \u00b6 SPARQL Construct and Describe queries return data in the RDF format. This allows them to be used with function loadRdf() . The following snippet loads the results of a SPARQL query into the internal RDF store of TriplyETL: loadRdf(Source.TriplyDb.query('my-account', 'my-construct-query')), SPARQL Select queries \u00b6 SPARQL Select queries return data in either the CSV, JSON, TSV, or XML format. This allows them to be used with the following four extractors: fromCsv() , fromJson() , fromTsv() , and fromXml() . The following code snippet connects to the table returned by a SPARQL Select query in TriplyDB: fromCsv(Source.TriplyDb.query('my-account', 'my-select-query')), As with other TriplyDB sources, the account name is optional. When omitted, the user account that is associated with the current API Token is used: loadRdf(Source.TriplyDb.query('my-construct-query')), Versioning \u00b6 In production systems, applications must be able to choose whether they want to use the latest version of a query (acceptance mode), or whether they want to use a specific recent version (production mode), or whether they want to use a specific older version (legacy mode). Versioning is supported by TriplyDB saved queries. When no specific version is specified, a TriplyETL pipeline will use the latest version of a query automatically. In order to use a specific version, the version option can be set to a version number. The following snippet uses a specific version of a query: fromJson(Source.TriplyDb.query('my-query', { version: 2 })), Not specifying the version option automatically uses the latest version. API variables \u00b6 In production systems, applications often need to request distinct information based on a limited set of input variables. This is supported in TriplyDB saved queries which API variables. API variables ensure that the query string is parameterized correctly, while adhering to the RDF and SPARQL standards. The following example binds the ?country variable inside the query string to literal 'Holland' . This allows the results for Holland to be returned: fromCsv( Source.TriplyDb.query( 'information-about-countries', { variables: { country: 'Holland' } } ) ), Pagination \u00b6 When a bare SPARQL endpoint is queried as an online API , there are sometimes issues with retrieving the full result set for larger queries. With TriplyDB saved queries, the process of obtaining all results is abstracted away from the user, with the TriplyETL source performing multiple requests in the background as needed. Result graph \u00b6 It is often useful to store the results of SPARQL Construct and Describe queries in a specific graph. For example, when internal data is enriched with external sources, it is often useful to store the external enrichments in a separate graph. Another example is the use of a query that applies RDF(S) and/or OWL reasoning. In such cases the results of the reasoner may be stored in a specific graph. The following snippet stores the results of the specified construct query in a special enrichment graph: loadRdf( Source.TriplyDb.query('my-query', { toGraph: graph.enrichment }) ) This snippet assumes that the graph names have been declared (see Delcarations ). TriplyDB instance \u00b6 The triplyDb option can be used to specify that a query from a different TriplyDB instance should be used. This option works in the same way as for TriplyDB assets: link","title":"TriplyDB Queries"},{"location":"triply-etl/sources/triplydb-queries/#triplydb-queries","text":"Saved SPARQL queries in TriplyDB can be used as data sources. SPARQL queries are very powerful data sources, since they allow complex filters to be expressed. There are 4 SPARQL query forms, with different source extractors that can process their results: Query form Source extractor SPARQL Ask fromJson() , fromXml() SPARQL Construct loadRdf() SPARQL Describe loadRdf() SPARQL Select fromCsv() , fromJson() , fromTsv() , fromXml()","title":"TriplyDB Queries"},{"location":"triply-etl/sources/triplydb-queries/#sparql-ask-queries","text":"SPARQL Ask queries can return data in either the JSON or the XML format. This allows them to be processed with the extractors fromCsv() and fromXml() . The following code snippet connects to the XML results of a SPARQL Ask query in TriplyDB: fromXml(Source.TriplyDb.query('my-account', 'my-ask-query')),","title":"SPARQL Ask queries"},{"location":"triply-etl/sources/triplydb-queries/#sparql-construct-and-describe-queries","text":"SPARQL Construct and Describe queries return data in the RDF format. This allows them to be used with function loadRdf() . The following snippet loads the results of a SPARQL query into the internal RDF store of TriplyETL: loadRdf(Source.TriplyDb.query('my-account', 'my-construct-query')),","title":"SPARQL Construct and Describe queries"},{"location":"triply-etl/sources/triplydb-queries/#sparql-select-queries","text":"SPARQL Select queries return data in either the CSV, JSON, TSV, or XML format. This allows them to be used with the following four extractors: fromCsv() , fromJson() , fromTsv() , and fromXml() . The following code snippet connects to the table returned by a SPARQL Select query in TriplyDB: fromCsv(Source.TriplyDb.query('my-account', 'my-select-query')), As with other TriplyDB sources, the account name is optional. When omitted, the user account that is associated with the current API Token is used: loadRdf(Source.TriplyDb.query('my-construct-query')),","title":"SPARQL Select queries"},{"location":"triply-etl/sources/triplydb-queries/#versioning","text":"In production systems, applications must be able to choose whether they want to use the latest version of a query (acceptance mode), or whether they want to use a specific recent version (production mode), or whether they want to use a specific older version (legacy mode). Versioning is supported by TriplyDB saved queries. When no specific version is specified, a TriplyETL pipeline will use the latest version of a query automatically. In order to use a specific version, the version option can be set to a version number. The following snippet uses a specific version of a query: fromJson(Source.TriplyDb.query('my-query', { version: 2 })), Not specifying the version option automatically uses the latest version.","title":"Versioning"},{"location":"triply-etl/sources/triplydb-queries/#api-variables","text":"In production systems, applications often need to request distinct information based on a limited set of input variables. This is supported in TriplyDB saved queries which API variables. API variables ensure that the query string is parameterized correctly, while adhering to the RDF and SPARQL standards. The following example binds the ?country variable inside the query string to literal 'Holland' . This allows the results for Holland to be returned: fromCsv( Source.TriplyDb.query( 'information-about-countries', { variables: { country: 'Holland' } } ) ),","title":"API variables"},{"location":"triply-etl/sources/triplydb-queries/#pagination","text":"When a bare SPARQL endpoint is queried as an online API , there are sometimes issues with retrieving the full result set for larger queries. With TriplyDB saved queries, the process of obtaining all results is abstracted away from the user, with the TriplyETL source performing multiple requests in the background as needed.","title":"Pagination"},{"location":"triply-etl/sources/triplydb-queries/#result-graph","text":"It is often useful to store the results of SPARQL Construct and Describe queries in a specific graph. For example, when internal data is enriched with external sources, it is often useful to store the external enrichments in a separate graph. Another example is the use of a query that applies RDF(S) and/or OWL reasoning. In such cases the results of the reasoner may be stored in a specific graph. The following snippet stores the results of the specified construct query in a special enrichment graph: loadRdf( Source.TriplyDb.query('my-query', { toGraph: graph.enrichment }) ) This snippet assumes that the graph names have been declared (see Delcarations ).","title":"Result graph"},{"location":"triply-etl/sources/triplydb-queries/#triplydb-instance","text":"The triplyDb option can be used to specify that a query from a different TriplyDB instance should be used. This option works in the same way as for TriplyDB assets: link","title":"TriplyDB instance"},{"location":"triply-etl/tmp/automation/","text":"On this page: Special key: $environment TriplyETL runs within a Gitlab CI environment ( Figure 1 ). Figure 1: The landing page of a TriplyETL project in Gitlab. Special key: $environment \u00b6 The special key $environment denotes the DTAP environment in which the TriplyETL pipeline is running. This allows special actions to be performed based on whether the pipeline runs in \"Debug\" , \"Test\" , \"Acceptance\" , or \"Production\" mode. See the DTAP documentation for more information.","title":"Automation"},{"location":"triply-etl/tmp/automation/#special-key-environment","text":"The special key $environment denotes the DTAP environment in which the TriplyETL pipeline is running. This allows special actions to be performed based on whether the pipeline runs in \"Debug\" , \"Test\" , \"Acceptance\" , or \"Production\" mode. See the DTAP documentation for more information.","title":"Special key: $environment"},{"location":"triply-etl/tmp/ci-cd/","text":"On this page: How to create a TriplyETL CI pipeline? Modifying a pipeline artifacts: variables: script: only: This document explains how to maintain an ETL that runs in the gitlab CI. How to create a TriplyETL CI pipeline? \u00b6 Use the TriplyETL boilerplate from this repository , specifically the etl folder. Each customer organization in gitlab needs their own 'CI runner'. If you're adding a TriplyETL repo to an existing customer, then you're probably fine. But if this is a newly created organization, you will need to register a runner for this organization. To check whether the customer organization has a runner configured, go to the customer organization Settings in gitlab, then go to CI/CD . The URL should look something like this: https://git.triply.cc/groups/customers//-/settings/ci_cd . Click Runners and verify that Available runners: is not zero. If you need to register a runner, contact a sysadmin (see here for the list of sysadmins) and ask them to create a runner for your organization. Modifying a pipeline \u00b6 To change what happens in a CI pipeline, all you need to do is modify the .gitlab-ci.yml file of your repository. Below we detail some relevant .gitlab-ci.yml fields that are used in most of our ETL pipelines (see here for the complete gitlab documentation on what all these fields mean) artifacts: \u00b6 Artifacts are files or directories that gitlab will save for you. These files are available on the gitlab pipelines page after a job ran. This is particularly useful for TriplyETL error/log files, as you download these from the gitlab UI. variables: \u00b6 You can define environment variables in several places. In the .gitlab.yml file, you can configure them at a job level or for all jobs. You can also configure them in the gitlab UI in the pipeline schedule form. Variables defined in a pipeline schedule will overwrite variables defined in the .gitlab-ci.yml file (see here for the gitlab documentation on variable precedence). script: \u00b6 This is the code that will run in the job. If you need a job to run two TriplyETL commands after each other, you can easily add another npx etl .... line here. only: \u00b6 This defines when a job should run. If a job does not have an only: configure, it will always run. See here for documentation about the syntax of only ). The boilerplate comes with some example only: rules that look like this: only: variables: - $JOB == \"$CI_JOB_NAME\" This means that we only run this job if there a JOB environment variable that is the same as CI_JOB_NAME . Notice that the CI_JOB_NAME is a default environment variable that gitlab gives us and that equals the name of the job, e.g. production (see other predefined variables here ). If you want to run this specific job, JOB is the environment variable that you should set in the pipeline schedule page. In other words, if you set JOB=production in the pipeline schedule page, then using the above only: rule, only the intended job will run.","title":"Ci cd"},{"location":"triply-etl/tmp/ci-cd/#how-to-create-a-triplyetl-ci-pipeline","text":"Use the TriplyETL boilerplate from this repository , specifically the etl folder. Each customer organization in gitlab needs their own 'CI runner'. If you're adding a TriplyETL repo to an existing customer, then you're probably fine. But if this is a newly created organization, you will need to register a runner for this organization. To check whether the customer organization has a runner configured, go to the customer organization Settings in gitlab, then go to CI/CD . The URL should look something like this: https://git.triply.cc/groups/customers//-/settings/ci_cd . Click Runners and verify that Available runners: is not zero. If you need to register a runner, contact a sysadmin (see here for the list of sysadmins) and ask them to create a runner for your organization.","title":"How to create a TriplyETL CI pipeline?"},{"location":"triply-etl/tmp/ci-cd/#modifying-a-pipeline","text":"To change what happens in a CI pipeline, all you need to do is modify the .gitlab-ci.yml file of your repository. Below we detail some relevant .gitlab-ci.yml fields that are used in most of our ETL pipelines (see here for the complete gitlab documentation on what all these fields mean)","title":"Modifying a pipeline"},{"location":"triply-etl/tmp/ci-cd/#artifacts","text":"Artifacts are files or directories that gitlab will save for you. These files are available on the gitlab pipelines page after a job ran. This is particularly useful for TriplyETL error/log files, as you download these from the gitlab UI.","title":"artifacts:"},{"location":"triply-etl/tmp/ci-cd/#variables","text":"You can define environment variables in several places. In the .gitlab.yml file, you can configure them at a job level or for all jobs. You can also configure them in the gitlab UI in the pipeline schedule form. Variables defined in a pipeline schedule will overwrite variables defined in the .gitlab-ci.yml file (see here for the gitlab documentation on variable precedence).","title":"variables:"},{"location":"triply-etl/tmp/ci-cd/#script","text":"This is the code that will run in the job. If you need a job to run two TriplyETL commands after each other, you can easily add another npx etl .... line here.","title":"script:"},{"location":"triply-etl/tmp/ci-cd/#only","text":"This defines when a job should run. If a job does not have an only: configure, it will always run. See here for documentation about the syntax of only ). The boilerplate comes with some example only: rules that look like this: only: variables: - $JOB == \"$CI_JOB_NAME\" This means that we only run this job if there a JOB environment variable that is the same as CI_JOB_NAME . Notice that the CI_JOB_NAME is a default environment variable that gitlab gives us and that equals the name of the job, e.g. production (see other predefined variables here ). If you want to run this specific job, JOB is the environment variable that you should set in the pipeline schedule page. In other words, if you set JOB=production in the pipeline schedule page, then using the above only: rule, only the intended job will run.","title":"only:"},{"location":"triply-etl/tmp/context/","text":"On this page: Context Configuring the Context Configuring the standard graph Configuring the well-known IRI prefix Context \u00b6 Configuring the Context \u00b6 The TriplyETL Context is specified when the Etl object is instantiated. This often appears towards the start of a pipeline script. The TriplyETL Context allows the following things to be specified: The data sources that can be used in the ETL. The data destinations where linked data is published to. The named graph in which triple calls with no graph argument add their data. The prefix IRI for blank node-replacing well-known IRIs. Configuring the standard graph \u00b6 When we call triple with 3 arguments, a triple is created and placed in a named graph that is chosen by TriplyETL. You can change the name of this default graph by specifying it in the TriplyETL context. Notice that graph names must be IRIs: const etl = new Etl() Configuring the well-known IRI prefix \u00b6 TriplyDB performs Skolemization, an approach in which blank nodes are systematically replaced by well-known IRIs. TriplyDB chooses a well-known IRI prefix for you, const etl = new Etl({ wellKnownIriPrefix: 'https://triplydb.com/Triply/example/.well-known/genid/', })","title":"Context"},{"location":"triply-etl/tmp/context/#context","text":"","title":"Context"},{"location":"triply-etl/tmp/context/#configuring-the-context","text":"The TriplyETL Context is specified when the Etl object is instantiated. This often appears towards the start of a pipeline script. The TriplyETL Context allows the following things to be specified: The data sources that can be used in the ETL. The data destinations where linked data is published to. The named graph in which triple calls with no graph argument add their data. The prefix IRI for blank node-replacing well-known IRIs.","title":"Configuring the Context"},{"location":"triply-etl/tmp/context/#configuring-the-standard-graph","text":"When we call triple with 3 arguments, a triple is created and placed in a named graph that is chosen by TriplyETL. You can change the name of this default graph by specifying it in the TriplyETL context. Notice that graph names must be IRIs: const etl = new Etl()","title":"Configuring the standard graph"},{"location":"triply-etl/tmp/context/#configuring-the-well-known-iri-prefix","text":"TriplyDB performs Skolemization, an approach in which blank nodes are systematically replaced by well-known IRIs. TriplyDB chooses a well-known IRI prefix for you, const etl = new Etl({ wellKnownIriPrefix: 'https://triplydb.com/Triply/example/.well-known/genid/', })","title":"Configuring the well-known IRI prefix"},{"location":"triply-etl/tmp/copy/","text":"On this page: Copy an existing entry over to a new entry Function signature Copy an existing entry over to a new entry \u00b6 Copying is the act of creating a new thing that is based on a specific existing thing. Function signature \u00b6 The copy function has the following signature: etl.use( copy({ fromKey: 'FROM_KEY', toKey: 'TO_KEY', type: 'VALUE_TYPE', change: value => FUNCTION_BODY}), ) This function copies the value from \u2018foo\u2019 to \u2018bar\u2019. The type key ensures that the value in \u2018foo\u2019 is cast to the specified type prior to being copied. The optional change key allows the cast value to be transformed prior to storing it in \u2018bar\u2019. Leaving the change key out results in a direct copy in which the value is not modified. This function emits an error if fromKey and toKey are the same. If you want to change a value in-place you should use change instead. This function emits an error if toKey already exists. If you want to replace the value in an existing entry then you should use replace instead. The change function only takes the value argument and does not take the context argument. If you need the context argument then they must use add instead.","title":"Copy"},{"location":"triply-etl/tmp/copy/#copy-an-existing-entry-over-to-a-new-entry","text":"Copying is the act of creating a new thing that is based on a specific existing thing.","title":"Copy an existing entry over to a new entry"},{"location":"triply-etl/tmp/copy/#function-signature","text":"The copy function has the following signature: etl.use( copy({ fromKey: 'FROM_KEY', toKey: 'TO_KEY', type: 'VALUE_TYPE', change: value => FUNCTION_BODY}), ) This function copies the value from \u2018foo\u2019 to \u2018bar\u2019. The type key ensures that the value in \u2018foo\u2019 is cast to the specified type prior to being copied. The optional change key allows the cast value to be transformed prior to storing it in \u2018bar\u2019. Leaving the change key out results in a direct copy in which the value is not modified. This function emits an error if fromKey and toKey are the same. If you want to change a value in-place you should use change instead. This function emits an error if toKey already exists. If you want to replace the value in an existing entry then you should use replace instead. The change function only takes the value argument and does not take the context argument. If you need the context argument then they must use add instead.","title":"Function signature"},{"location":"triply-etl/tmp/faq/","text":"On this page: FAQ Why does my pipeline schedule only run an install job? I made a change to the .gitlab-ci.yml file and after I push I see a pipeline failed with status yaml invalid. How can I fix this? Why is my pipeline not running and marked as 'pending'? What do all these $CI_... environment variables mean? What should I do when the pipeline fail when I commit in a personal project? FAQ \u00b6 Why does my pipeline schedule only run an install job? \u00b6 This probably means that none of the only: rules in your .gitlab-ci.yml file match. You should check whether the variables you've set in the pipelines schedules page match with the only: rules in your .gitlab-ci.yml file. I made a change to the .gitlab-ci.yml file and after I push I see a pipeline failed with status yaml invalid . How can I fix this? \u00b6 This error is emitted when the content of your .gitlab-ci.yml file does not follow the Yaml syntax. In GitLab, search for a page called CI Lint. On that page, copy-paste the contents of your .gitlab-ci.yml file into the text field and click validate. GitLab will indicate to you the location in your file where the Yaml syntax is violated, so that you can fix this. Why is my pipeline not running and marked as 'pending'? \u00b6 This probably means that you have not configured an ETL runner for this customer organization yet. See the section about getting started here What do all these $CI_... environment variables mean? \u00b6 These are environment variables added by gitlab. To see what they mean, go to this gitlab documentation page, also mentioned above. What should I do when the pipeline fail when I commit in a personal project? \u00b6 In a personal repository, you have available runners, but shared ones. Thus, your pipelines will fail. This is expected and it is not an issue. You can either ignore the failed pipeline or remove gitlab-ci.yml from the repository.","title":"Faq"},{"location":"triply-etl/tmp/faq/#faq","text":"","title":"FAQ"},{"location":"triply-etl/tmp/faq/#why-does-my-pipeline-schedule-only-run-an-install-job","text":"This probably means that none of the only: rules in your .gitlab-ci.yml file match. You should check whether the variables you've set in the pipelines schedules page match with the only: rules in your .gitlab-ci.yml file.","title":"Why does my pipeline schedule only run an install job?"},{"location":"triply-etl/tmp/faq/#i-made-a-change-to-the-gitlab-ciyml-file-and-after-i-push-i-see-a-pipeline-failed-with-status-yaml-invalid-how-can-i-fix-this","text":"This error is emitted when the content of your .gitlab-ci.yml file does not follow the Yaml syntax. In GitLab, search for a page called CI Lint. On that page, copy-paste the contents of your .gitlab-ci.yml file into the text field and click validate. GitLab will indicate to you the location in your file where the Yaml syntax is violated, so that you can fix this.","title":"I made a change to the .gitlab-ci.yml file and after I push I see a pipeline failed with status yaml invalid. How can I fix this?"},{"location":"triply-etl/tmp/faq/#why-is-my-pipeline-not-running-and-marked-as-pending","text":"This probably means that you have not configured an ETL runner for this customer organization yet. See the section about getting started here","title":"Why is my pipeline not running and marked as 'pending'?"},{"location":"triply-etl/tmp/faq/#what-do-all-these-ci_-environment-variables-mean","text":"These are environment variables added by gitlab. To see what they mean, go to this gitlab documentation page, also mentioned above.","title":"What do all these $CI_... environment variables mean?"},{"location":"triply-etl/tmp/faq/#what-should-i-do-when-the-pipeline-fail-when-i-commit-in-a-personal-project","text":"In a personal repository, you have available runners, but shared ones. Thus, your pipelines will fail. This is expected and it is not an issue. You can either ignore the failed pipeline or remove gitlab-ci.yml from the repository.","title":"What should I do when the pipeline fail when I commit in a personal project?"},{"location":"triply-etl/tmp/getting-started/","text":"On this page: Getting started Transforming RDF data Connect a data source Important terms before starting to work with TriplyETL Middlewares What is a record? What is the store? What is the context(ctx)? A JSON data source An XML data source Getting started \u00b6 Transforming RDF data \u00b6 If you have RDF data that does not need to be transformed, see copying source data . If you have RDF data that does need to be transformed, you can use the following pattern. This example renames the graph. const etl = new Etl({ defaultGraph: graph.model }) etl.use( loadRdf(Source.file(`data/shapes.trig`)), mapQuads( (quad, ctx) => ctx.store.quad( quad.subject, quad.predicate, quad.object, iri(prefix.graph, 'new-graph') ) ), toRdf(Destination.TriplyDb.rdf('my-dataset', remoteOptions)) ) Similarly, you can change all the subject, predicates or objects in your data. Also, you can choose to transform triples of a specific subject, predicate, object or graph name. in this case, you should use: mapQuads( (quad, ctx) => ctx.store.quad( quad.subject, app.prefix.example('new-predicate'), quad.object, quad.graph ), {predicate: app.prefix.example(\"old-predicate\")} ) Connect a data source \u00b6 This section extends the pipeline from the previous section by connecting a data source. TriplyETL can connect to database systems and web APIs, but to keep things simple we will use the following tabular input data from a local file: ID NAME 00001 Anna 00002 Bob 00003 Carol We then perform the following steps to build a pipelines that processes this data source: 1. Create a text file called example.csv in a text editor, and copy/paste the following source data into that file: ID,NAME 00001,Anna 00002,Bob 00003,Carol 2. Open text file main.ts and add the following content: import { Etl, declarePrefix, fromCsv, iri, literal, rdfs, Source, toRdf, triple } from '@triplyetl/etl/generic' import { rdfs } from '@triplyetl/vocabularies' export default async function (): Promise { const etl = new Etl({ prefixes: { ex: declarePrefix('https://example.com/'), }, }) etl.use( // Connects the tabular source data to the pipeline. // Every row in the table is processed as a TriplyETL record. fromCsv(Source.file('example.csv')), // Create a linked data statement that is based on the // source data. triple(iri(etl.prefix.ex, 'ID'), rdfs.label, 'NAME'), toRdf(Destination.file('example.ttl')) ) return etl } 3. Transpile the code with: npm run build 4. Run the ETL with: npx etl The TriplyETL script will give you a link to the uploaded dataset. This dataset contains the following graph content: Important terms before starting to work with TriplyETL \u00b6 Middlewares \u00b6 The most common occurrence in ETL are the middlewares. Middlewares are essentially reusable pieces of code that execute a certain long and/or complex piece of functionality. An middleware is a piece of code that transforms a record and can be invoked with etl.use(). Example of middleware function: loadRdf(Source.TriplyDb.query('my-account', 'my-query')), What is a record? \u00b6 TriplyETL doesn't have infinite memory and not all data can be loaded at once. So instead of loading data all at once, first one part of data is processed and written to the file, and then the second one, third one, and so on. These parts are called records. Each record goes through all middlewares before a new record is started. What is the store? \u00b6 As mentioned above, when ETL is running we go through data record by record. Together with the input data we also have output data. Before being written to the final destination (triplyDB or file), output data has to be kept somewhere and that's what store is for. The store is for temporarily storing linked data. Every record has its own store. toRdf reads from the store. etl.use( toRdf(Ratt.Destination.file('example.ttl')) ) What is the context(ctx)? \u00b6 In TriplyETL, the context is an object that represents the current record. The context gives us access to the triple store, the in memory storage of our triples. It also contains utility functions that will be used to modify and transform our source data into linked data. Some examples of ctx: ctx.getString(\"address\") ctx.getIri(...) ctx.getArray(...) ctx.store.addQuad(...) ctx.store.getQuad(...) //etc. A JSON data source \u00b6 The following code snippet uses extractor fromJson() with two inline example records: import { fromJson, logRecord, Etl } from '@triplydb/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { id: '123', name: 'John' }, { id: '456', name: 'Jane' }, ]), logRecord(), ) return etl } Debug function logRecord() prints the current record to standard output. When this pipeline is run, the two records are printed as follows: { \"id\": \"123\", \"name\": \"John\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"id\": \"456\", \"name\": \"Jane\", \"$recordId\": 2, \"$environment\": \"Development\" } Notice that TriplyETL adds two keys to both records: $recordId and $environment (see Special Key ). An XML data source \u00b6 Now suppose that we change the source system. We no longer use in-line JSON, but will instead use an XML file. The contents of the XML file are as follows: 123 John 456 Jane Let us change the TriplyETL script to use the XML source connector: import { Etl, fromXml, logRecord, Source } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( fromXml(Source.file('example.xml')), logRecord(), ) return etl } This new script logs the following two records: { \"id\": \"123\", \"name\": \"John\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"id\": \"456\", \"name\": \"Jane\", \"$recordId\": 2, \"$environment\": \"Development\" } Notice that the two records that are logged from an XML source are completely identical to the two records that were previously logged from a JSON source. This is an essential property of TriplyETL: it treats data from any source system in the same way, using the same intermediary record format. This makes it easy to write pipelines that process data from a large number of different data sources. This also makes replacing a data source in one format with a data source in another format a relatively cheap operation. More often than not, only the source connector needs to be changed, and all transformations and assertions remain as they were.","title":"Getting started"},{"location":"triply-etl/tmp/getting-started/#getting-started","text":"","title":"Getting started"},{"location":"triply-etl/tmp/getting-started/#transforming-rdf-data","text":"If you have RDF data that does not need to be transformed, see copying source data . If you have RDF data that does need to be transformed, you can use the following pattern. This example renames the graph. const etl = new Etl({ defaultGraph: graph.model }) etl.use( loadRdf(Source.file(`data/shapes.trig`)), mapQuads( (quad, ctx) => ctx.store.quad( quad.subject, quad.predicate, quad.object, iri(prefix.graph, 'new-graph') ) ), toRdf(Destination.TriplyDb.rdf('my-dataset', remoteOptions)) ) Similarly, you can change all the subject, predicates or objects in your data. Also, you can choose to transform triples of a specific subject, predicate, object or graph name. in this case, you should use: mapQuads( (quad, ctx) => ctx.store.quad( quad.subject, app.prefix.example('new-predicate'), quad.object, quad.graph ), {predicate: app.prefix.example(\"old-predicate\")} )","title":"Transforming RDF data"},{"location":"triply-etl/tmp/getting-started/#connect-a-data-source","text":"This section extends the pipeline from the previous section by connecting a data source. TriplyETL can connect to database systems and web APIs, but to keep things simple we will use the following tabular input data from a local file: ID NAME 00001 Anna 00002 Bob 00003 Carol We then perform the following steps to build a pipelines that processes this data source: 1. Create a text file called example.csv in a text editor, and copy/paste the following source data into that file: ID,NAME 00001,Anna 00002,Bob 00003,Carol 2. Open text file main.ts and add the following content: import { Etl, declarePrefix, fromCsv, iri, literal, rdfs, Source, toRdf, triple } from '@triplyetl/etl/generic' import { rdfs } from '@triplyetl/vocabularies' export default async function (): Promise { const etl = new Etl({ prefixes: { ex: declarePrefix('https://example.com/'), }, }) etl.use( // Connects the tabular source data to the pipeline. // Every row in the table is processed as a TriplyETL record. fromCsv(Source.file('example.csv')), // Create a linked data statement that is based on the // source data. triple(iri(etl.prefix.ex, 'ID'), rdfs.label, 'NAME'), toRdf(Destination.file('example.ttl')) ) return etl } 3. Transpile the code with: npm run build 4. Run the ETL with: npx etl The TriplyETL script will give you a link to the uploaded dataset. This dataset contains the following graph content:","title":"Connect a data source"},{"location":"triply-etl/tmp/getting-started/#important-terms-before-starting-to-work-with-triplyetl","text":"","title":"Important terms before starting to work with TriplyETL"},{"location":"triply-etl/tmp/getting-started/#middlewares","text":"The most common occurrence in ETL are the middlewares. Middlewares are essentially reusable pieces of code that execute a certain long and/or complex piece of functionality. An middleware is a piece of code that transforms a record and can be invoked with etl.use(). Example of middleware function: loadRdf(Source.TriplyDb.query('my-account', 'my-query')),","title":"Middlewares"},{"location":"triply-etl/tmp/getting-started/#what-is-a-record","text":"TriplyETL doesn't have infinite memory and not all data can be loaded at once. So instead of loading data all at once, first one part of data is processed and written to the file, and then the second one, third one, and so on. These parts are called records. Each record goes through all middlewares before a new record is started.","title":"What is a record?"},{"location":"triply-etl/tmp/getting-started/#what-is-the-store","text":"As mentioned above, when ETL is running we go through data record by record. Together with the input data we also have output data. Before being written to the final destination (triplyDB or file), output data has to be kept somewhere and that's what store is for. The store is for temporarily storing linked data. Every record has its own store. toRdf reads from the store. etl.use( toRdf(Ratt.Destination.file('example.ttl')) )","title":"What is the store?"},{"location":"triply-etl/tmp/getting-started/#what-is-the-contextctx","text":"In TriplyETL, the context is an object that represents the current record. The context gives us access to the triple store, the in memory storage of our triples. It also contains utility functions that will be used to modify and transform our source data into linked data. Some examples of ctx: ctx.getString(\"address\") ctx.getIri(...) ctx.getArray(...) ctx.store.addQuad(...) ctx.store.getQuad(...) //etc.","title":"What is the context(ctx)?"},{"location":"triply-etl/tmp/getting-started/#a-json-data-source","text":"The following code snippet uses extractor fromJson() with two inline example records: import { fromJson, logRecord, Etl } from '@triplydb/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { id: '123', name: 'John' }, { id: '456', name: 'Jane' }, ]), logRecord(), ) return etl } Debug function logRecord() prints the current record to standard output. When this pipeline is run, the two records are printed as follows: { \"id\": \"123\", \"name\": \"John\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"id\": \"456\", \"name\": \"Jane\", \"$recordId\": 2, \"$environment\": \"Development\" } Notice that TriplyETL adds two keys to both records: $recordId and $environment (see Special Key ).","title":"A JSON data source"},{"location":"triply-etl/tmp/getting-started/#an-xml-data-source","text":"Now suppose that we change the source system. We no longer use in-line JSON, but will instead use an XML file. The contents of the XML file are as follows: 123 John 456 Jane Let us change the TriplyETL script to use the XML source connector: import { Etl, fromXml, logRecord, Source } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( fromXml(Source.file('example.xml')), logRecord(), ) return etl } This new script logs the following two records: { \"id\": \"123\", \"name\": \"John\", \"$recordId\": 1, \"$environment\": \"Development\" } { \"id\": \"456\", \"name\": \"Jane\", \"$recordId\": 2, \"$environment\": \"Development\" } Notice that the two records that are logged from an XML source are completely identical to the two records that were previously logged from a JSON source. This is an essential property of TriplyETL: it treats data from any source system in the same way, using the same intermediary record format. This makes it easy to write pipelines that process data from a large number of different data sources. This also makes replacing a data source in one format with a data source in another format a relatively cheap operation. More often than not, only the source connector needs to be changed, and all transformations and assertions remain as they were.","title":"An XML data source"},{"location":"triply-etl/tmp/main-loop/","text":"On this page: The main loop The main loop \u00b6 The following code snippet shows the main TriplyETL loop. Every TriplyETL pipeline consists of such a loop. import { Etl } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( // Etc ) return etl } By adding the following five components, you configure the pipeline to create a linked data knowledge graph for your organization: Declarations declare IRI prefixes, graph names, and vocabularies that are used in the pipeline configuration. Source Connectors connect to the systems that add source data to your knowledge graph. Transformations clean, modify, and enrich the source data. Assertions generate the linked data that goes into the knowledge graph. Validation ensures that the linked data that is added to the knowledge graph follows your data model. Publication makes the linked data knowledge graph available in a triple store. These six components occur in specific places inside the TripleETL main loop, as indicated by the comments in the following code snippet: import { Etl } from '@triplyetl/etl/generic' // 1. Declarations are made before the main loop. export default async function (): Promise { const etl = new Etl() etl.use( // 2. Source Connectors appear at the top. // 3. Transformations appear in the middle. // 4. Assertions appear in the middle. // 5. Validation occurs directly before publication. // 6. Publication appears at the bottom. ) return etl }","title":"Main loop"},{"location":"triply-etl/tmp/main-loop/#the-main-loop","text":"The following code snippet shows the main TriplyETL loop. Every TriplyETL pipeline consists of such a loop. import { Etl } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( // Etc ) return etl } By adding the following five components, you configure the pipeline to create a linked data knowledge graph for your organization: Declarations declare IRI prefixes, graph names, and vocabularies that are used in the pipeline configuration. Source Connectors connect to the systems that add source data to your knowledge graph. Transformations clean, modify, and enrich the source data. Assertions generate the linked data that goes into the knowledge graph. Validation ensures that the linked data that is added to the knowledge graph follows your data model. Publication makes the linked data knowledge graph available in a triple store. These six components occur in specific places inside the TripleETL main loop, as indicated by the comments in the following code snippet: import { Etl } from '@triplyetl/etl/generic' // 1. Declarations are made before the main loop. export default async function (): Promise { const etl = new Etl() etl.use( // 2. Source Connectors appear at the top. // 3. Transformations appear in the middle. // 4. Assertions appear in the middle. // 5. Validation occurs directly before publication. // 6. Publication appears at the bottom. ) return etl }","title":"The main loop"},{"location":"triply-etl/tmp/source-destination/","text":"On this page: Source destination An easier way to configure graph names and prefixes Source destination \u00b6 An easier way to configure graph names and prefixes \u00b6 Instead of setting the graph name and the prefixes for every ETL, you can use functions for their generation: export function create_prefixes( organization: string = default_organization, dataset: string, host: string = default_host ) { const prefix_base = Ratt.prefixer(`https://${host}/${organization}/${dataset}/`) const prefix_bnode = Ratt.prefixer(prefix_base(`.well-known/genid/`)) const prefix_graph = Ratt.prefixer(prefix_base(`graph/`)) return { bnode: prefix_bnode, graph: prefix_graph, } } For example, if host==='triplydb.com' , organization==='exampleOrganization' and dataset='pokemon' , then the prefix for the blank nodes will be https://triplydb.com/exampleOrganization/pokemon/.well-known/genid/ . Then, similarly, you can use another function for the graph names: export function create_graphs( dataset: string, organization: string = default_organization, host: string = default_host ) { const prefix = create_prefixes(dataset, organization, host) return { default: prefix.graph('default'), metadata: prefix.graph('metadata'), instances: prefix.graph('instances'), instances_report: prefix.graph('instances/report'), shapes: prefix.graph('shapes'), } }","title":"Source destination"},{"location":"triply-etl/tmp/source-destination/#source-destination","text":"","title":"Source destination"},{"location":"triply-etl/tmp/source-destination/#an-easier-way-to-configure-graph-names-and-prefixes","text":"Instead of setting the graph name and the prefixes for every ETL, you can use functions for their generation: export function create_prefixes( organization: string = default_organization, dataset: string, host: string = default_host ) { const prefix_base = Ratt.prefixer(`https://${host}/${organization}/${dataset}/`) const prefix_bnode = Ratt.prefixer(prefix_base(`.well-known/genid/`)) const prefix_graph = Ratt.prefixer(prefix_base(`graph/`)) return { bnode: prefix_bnode, graph: prefix_graph, } } For example, if host==='triplydb.com' , organization==='exampleOrganization' and dataset='pokemon' , then the prefix for the blank nodes will be https://triplydb.com/exampleOrganization/pokemon/.well-known/genid/ . Then, similarly, you can use another function for the graph names: export function create_graphs( dataset: string, organization: string = default_organization, host: string = default_host ) { const prefix = create_prefixes(dataset, organization, host) return { default: prefix.graph('default'), metadata: prefix.graph('metadata'), instances: prefix.graph('instances'), instances_report: prefix.graph('instances/report'), shapes: prefix.graph('shapes'), } }","title":"An easier way to configure graph names and prefixes"},{"location":"triply-etl/tmp/static-dynamic-statements/","text":"On this page: Static and dynamic statements Create dynamic statements Static and dynamic triples When should you use an IRI instead of an URI literal? Limitation of literal() and iri() Static and dynamic statements \u00b6 Create dynamic statements \u00b6 Dynamic statements are statements that are based on some aspect of the source data. We use the following Record as an example: Country Inhabitants France null Germany 83190556 Netherlands 17650200 We start with creating the prefix and term declarations (see the Declare documentation for more information): const base = declarePrefix('https://triplydb.com/Triply/example/') const prefix = { def: declarePrefix(base('def/')), id: declarePrefix(base('id/')), xsd: declarePrefix('http://www.w3.org/2001/XMLSchema#'), } const def = { Country: prefix.def('Country'), name: prefix.def('inhabitants'), } const xsd = { positiveInteger: prefix.xsd('positiveInteger'), string: prefix.xsd('string'), } const input_string = ['Country', 'inhabitants'] With these prefix and term constants in place, a dynamic statement is created as follows: triple( iri(prefix.id, 'Country'), def.inhabitants, literal('Inhabitants', xsd.positiveInteger) ), Notice the following details: - iri() is used to create a dynamic IRI term. - Arguments Country and Inhabitants allow values for these keys to be used from processed Records. - The IRI prefix for the subject term is specified with constant prefix.id . - literal is used to create a dynamic literal term. - For literals a datatype IRI can be specified. If no datatype IRI is specified then the default IRI is xsd.string . iri.hashed can be used instead of iri when the ETL has a high number of blank nodes and they need more than one constant as input to hash a unique IRI. etl.use( triple( iri.hashed(prefix.id, input_string), def.inhabitants, mw.toLiteral('Inhabitants', {datatype: xsd.positiveInteger})), ) Notice the following details: - input_string can pass more than one constant to hash a unique IRI term. Static and dynamic triples \u00b6 Be aware that there are different approaches forstatic anddynamic IRIs: Static IRIs are created with prefix declarations (example [1a]). Dynamic IRIs are created with iri() , iri.hashed and prefix declarations (example [2a]). [1a] prefix.id('person') [2a] iri(prefix.id, 'person'), [3a] iri.hashed(prefix.id, ['person','age']), Notation [1a] creates thestatic* IRI [1b]. This IRI does not depend on the currently processed record. Notation [2a] creates thedynamic* IRI in [2b], assuming the \"person\" key contains the value \"John\" . This IRI depends on the currently processed record. For a different record, IRI [2c] may be created instead (assuming the \"person\" key contains the value \"Jane\" ). Notation [3a] creates thedynamic* IRI in [3b], assuming the \"person\" key contains the value \"Sam\" and the \"age\" key contains the value \"30\" . For a different record, IRI [3c] may be created instead (assuming the \"person\" key contains the value \"Roland\" and \"age\" key contains the value \"20\" ). [1b] id:person [2b] id:John [2c] id:Jane [3b] id:Sam , age: 30 [3c] id:Sam , age: 20 --> When should you use an IRI instead of an URI literal? \u00b6 An IRI is used to identify something, for example the city of Amsterdam. It is expected that accessing it returns linked data. An IRI can be used to make assertions about a subject. On the other hand, a URI is expected to return a non-linked data content, for example an HTML website, and can be used as objects in linked data, for example for inserting further information about the subject resource. In the example below, the subject IRI is described further by the object's URL. rdfs:seeAlso \"https://www.iamsterdam.com\"^^xsd:anyURI. An IRI can be created with iri() , while an URI is created by using literal() . Limitation of literal() and iri() \u00b6 There is a limitation for both literal() and iri() . It is not possible to change the value in the record in the literal() and iri() assertions. The value that is at that moment stored in the record for that key, is then added as either an IRI when called with the iri() function or as a literal when called with the function literal() . The limitation is shown in the example below. In the example we want to round the inhabitants number to the nearest thousand. We can not transform this in the literal() function. Instead we need to add a custom.change() middleware which will execute the transformation. custom.change({ key: 'Inhabitants', type: 'number', change: value => value / 1_000, }), triple( iri(prefix.id, 'Country'), def.name, literal('Inhabitants', xsd.positiveInteger) ),","title":"Static dynamic statements"},{"location":"triply-etl/tmp/static-dynamic-statements/#static-and-dynamic-statements","text":"","title":"Static and dynamic statements"},{"location":"triply-etl/tmp/static-dynamic-statements/#create-dynamic-statements","text":"Dynamic statements are statements that are based on some aspect of the source data. We use the following Record as an example: Country Inhabitants France null Germany 83190556 Netherlands 17650200 We start with creating the prefix and term declarations (see the Declare documentation for more information): const base = declarePrefix('https://triplydb.com/Triply/example/') const prefix = { def: declarePrefix(base('def/')), id: declarePrefix(base('id/')), xsd: declarePrefix('http://www.w3.org/2001/XMLSchema#'), } const def = { Country: prefix.def('Country'), name: prefix.def('inhabitants'), } const xsd = { positiveInteger: prefix.xsd('positiveInteger'), string: prefix.xsd('string'), } const input_string = ['Country', 'inhabitants'] With these prefix and term constants in place, a dynamic statement is created as follows: triple( iri(prefix.id, 'Country'), def.inhabitants, literal('Inhabitants', xsd.positiveInteger) ), Notice the following details: - iri() is used to create a dynamic IRI term. - Arguments Country and Inhabitants allow values for these keys to be used from processed Records. - The IRI prefix for the subject term is specified with constant prefix.id . - literal is used to create a dynamic literal term. - For literals a datatype IRI can be specified. If no datatype IRI is specified then the default IRI is xsd.string . iri.hashed can be used instead of iri when the ETL has a high number of blank nodes and they need more than one constant as input to hash a unique IRI. etl.use( triple( iri.hashed(prefix.id, input_string), def.inhabitants, mw.toLiteral('Inhabitants', {datatype: xsd.positiveInteger})), ) Notice the following details: - input_string can pass more than one constant to hash a unique IRI term.","title":"Create dynamic statements"},{"location":"triply-etl/tmp/static-dynamic-statements/#static-and-dynamic-triples","text":"Be aware that there are different approaches forstatic anddynamic IRIs: Static IRIs are created with prefix declarations (example [1a]). Dynamic IRIs are created with iri() , iri.hashed and prefix declarations (example [2a]). [1a] prefix.id('person') [2a] iri(prefix.id, 'person'), [3a] iri.hashed(prefix.id, ['person','age']), Notation [1a] creates thestatic* IRI [1b]. This IRI does not depend on the currently processed record. Notation [2a] creates thedynamic* IRI in [2b], assuming the \"person\" key contains the value \"John\" . This IRI depends on the currently processed record. For a different record, IRI [2c] may be created instead (assuming the \"person\" key contains the value \"Jane\" ). Notation [3a] creates thedynamic* IRI in [3b], assuming the \"person\" key contains the value \"Sam\" and the \"age\" key contains the value \"30\" . For a different record, IRI [3c] may be created instead (assuming the \"person\" key contains the value \"Roland\" and \"age\" key contains the value \"20\" ). [1b] id:person [2b] id:John [2c] id:Jane [3b] id:Sam , age: 30 [3c] id:Sam , age: 20 -->","title":"Static and dynamic triples"},{"location":"triply-etl/tmp/static-dynamic-statements/#when-should-you-use-an-iri-instead-of-an-uri-literal","text":"An IRI is used to identify something, for example the city of Amsterdam. It is expected that accessing it returns linked data. An IRI can be used to make assertions about a subject. On the other hand, a URI is expected to return a non-linked data content, for example an HTML website, and can be used as objects in linked data, for example for inserting further information about the subject resource. In the example below, the subject IRI is described further by the object's URL. rdfs:seeAlso \"https://www.iamsterdam.com\"^^xsd:anyURI. An IRI can be created with iri() , while an URI is created by using literal() .","title":"When should you use an IRI instead of an URI literal?"},{"location":"triply-etl/tmp/static-dynamic-statements/#limitation-of-literal-and-iri","text":"There is a limitation for both literal() and iri() . It is not possible to change the value in the record in the literal() and iri() assertions. The value that is at that moment stored in the record for that key, is then added as either an IRI when called with the iri() function or as a literal when called with the function literal() . The limitation is shown in the example below. In the example we want to round the inhabitants number to the nearest thousand. We can not transform this in the literal() function. Instead we need to add a custom.change() middleware which will execute the transformation. custom.change({ key: 'Inhabitants', type: 'number', change: value => value / 1_000, }), triple( iri(prefix.id, 'Country'), def.name, literal('Inhabitants', xsd.positiveInteger) ),","title":"Limitation of literal() and iri()"},{"location":"triply-etl/tmp/tmp/","text":"On this page: Create statements Create static statements Create dynamic statements Static and dynamic triples When should you use an IRI instead of an URI (which is a literal)? Limitation of literal, iri and iri.hashed Record IDs Process data conditionally Null values Missing values The empty string Custom functions Tree-shaped data Iterating over lists of objects Index key ($index) Parent key ($parent) Root key ($root) Iterating over lists of primitives Transforming RDF data Create statements \u00b6 After source data is connected and transformed, the RATT Record is ready to be transformed to linked data. Linked data statements are assertions or factual statements that consist of 3 terms (triple) or 4 terms (quadruples). Statements are created with the triple function. Calls to this function are part of the pipeline, and must appear inside the scope of etl.use . Create static statements \u00b6 Static linked data statements are statements that only make use of constant terms (see working with IRIs ). Constant terms are introduced at the beginning of a RATT pipeline, typically prior to the occurrence of the first etl.use scope. The following static statements make use of the constant terms introduced in the section on working with IRIs . etl.use( // \u201cJohn is a person.\u201d triple(ex.john, a, foaf.Person), // \u201cMary is a person.\u201d triple(ex.mary, a, foaf.Person), ) Create dynamic statements \u00b6 Dynamic statements are statements that are based on some aspect of the source data. We use the following RATT Record as an example: Country Inhabitants France null Germany 83190556 Netherlands 17650200 We start with creating the prefix and term declarations (see the section on working with IRIs for more information): const prefix_base = Ratt.prefixer('https://triplydb.com/Triply/example/') const prefix = { def: Ratt.prefixer(prefix_base('def/')), id: Ratt.prefixer(prefix_base('id/')), xsd: Ratt.prefixer('http://www.w3.org/2001/XMLSchema#'), } const def = { Country: prefix.def('Country'), name: prefix.def('inhabitants'), } const xsd = { positiveInteger: prefix.xsd('positiveInteger'), string: prefix.xsd('string'), } const input_string = ['Country', 'inhabitants'] With these prefix and term constants in place, a dynamic statement is created as follows: etl.use( triple( iri('Country', {prefix: prefix.id}), def.inhabitants, literal('Inhabitants', {datatype: xsd.positiveInteger})), ) Notice the following details: - iri is used to create a dynamic IRI term. - Arguments Country and Inhabitants allow values for these keys to be used from processed RATT Records. - The IRI prefix for the subject term is specified with constant prefix.id . - literal is used to create a dynamic literal term. - For literals a datatype IRI can be specified. If no datatype IRI is specified then the default IRI is xsd.string . iri.hashed can be used instead of iri when the ETL has a high number of blank nodes and they need more than one constant as input to hash a unique IRI. etl.use( triple( iri.hashed(prefix.id, input_string), def.inhabitants, mw.toLiteral('Inhabitants', {datatype: xsd.positiveInteger})), ) Notice the following details: - input_string can pass more than one constant to hash a unique IRI term. Static and dynamic triples \u00b6 Be aware that there are different approaches for static and dynamic IRIs: Static IRIs are created with prefix declarations (example [1a]). Dynamic IRIs are created with iri , iri.hashed and prefix declarations (example [2a]). [1a] prefix.id('person') [2a] iri(prefix.id, 'person'), [3a] iri.hashed(prefix.id, ['person','age']), Notation [1a] creates the static IRI [1b]. This IRI does not depend on the currently processed RATT record. Notation [2a] creates the dynamic IRI in [2b], assuming the \"person\" key contains the value \"John\" . This IRI depends on the currently processed RATT record. For a different RATT record, IRI [2c] may be created instead (assuming the \"person\" key contains the value \"Jane\" ). Notation [3a] creates the dynamic IRI in [3b], assuming the \"person\" key contains the value \"Sam\" and the \"age\" key contains the value \"30\" . For a different RATT record, IRI [3c] may be created instead (assuming the \"person\" key contains the value \"Roland\" and \"age\" key contains the value \"20\" ). [1b] id:person [2b] id:John [2c] id:Jane [3b] id:Sam , age: 30 [3c] id:Sam , age: 20 When should you use an IRI instead of an URI (which is a literal)? \u00b6 An IRI is used to identify something, for example the city of Amsterdam. It is expected that accessing it returns linked data. An IRI can be used to make assertions about a subject. On the other hand, a URI is expected to return a non-linked data content, for example an HTML website, and can be used as objects in linked data, for example for inserting further information about the subject resource. In the example below, the subject IRI is described further by the object's URL. rdfs:seeAlso \"https://www.iamsterdam.com\"^^xsd:anyURI. An IRI can be created with iri , while an URI is created by using literal . Limitation of literal , iri and iri.hashed \u00b6 There is a limitation for both literal , iri and iri.hashed . It is not possible to change the value in the record in the literal , iri and iri.hashed middlewares. The value that is at that moment stored in the record for that key, is then added as either an IRI when called with the iri / iri.hashed function or as a literal when called with the function literal . The limitation is shown in the example below. In the example we want to round the inhabitants number to the nearest thousand. We can not transform this in the literal function. Instead we need to add a change middleware which will execute the transformation. etl.use( change({ key: 'Inhabitants', type: 'number', change: (value) => value/1000 }), triple( iri(prefix.id, 'Country'), def.name, literal('Inhabitants', xsd.positiveInteger) ), ) Record IDs \u00b6 If your RATT Records do not contain a unique ID then you can use the recordId entry that RATT adds automatically. These recordId values are unique for every record processed in the same pipeline, but they are not an entry into the RATT Record by default. Record IDs are consistently assigned across runs of the same pipeline. They generate the same output as long as the input does not change. The following example code shows how the record ID can be added to each RATT Record: etl.use( add({ key: 'ID', value: context => app.prefix.observation(context.recordId.toString()) }), triple(iri(prefix.id, key_id), a, def.Country), ) Process data conditionally \u00b6 Source data often contains optional values. These are values that appear in some, but not all records. Source data often contains 'special' values to denote the absence of a value. Common examples are values such as 'NULL' or the empty string ( '' ) or 'clear' outliers such as 9999 for a missing year. We call such values \u2018null values\u2019. The when function supports the creation of triples under certain conditions. The first argument that this function takes establishes whether or not a certain condition is met. After that, one or more additional statement arguments appear that will only be called if the condition is satisfied. The generic structure of when is as follows: etl.use( when( '{condition}', '{statement-1}', '{statement-2}', '{statement-3}', ..., ) ) Notice that it is often useful to specify multiple statements under the same condition: The first statement transforms an optional value, and the second statement uses the transformed optional value to make a triple assertion. The first statement asserts one triple based on the optional value, and the second statement asserts a second triple based on the same optional value. Null values \u00b6 If a key contains a null value in some records, then we need to specifically identify the criteria under which a triple must be added. etl.use( // The source data uses '9999' to denote an unknown creation year. when( context => context.getNumber('CREATED') != 9999), triple( iri(prefix.id, 'ID'), dct.created, literal('CREATED', xsd.gYear))), Notice that the conditional function inside the when function takes the current RATT context as its single argument and returns a Boolean. Missing values \u00b6 If a value is sometimes completely missing from a source data record, then the following construct can be used to only add a triple in case the value is present: etl.use( // The source data does not always include a value for 'zipcode'. when( context => context.isNotEmpty('ZIPCODE'), triple( iri(prefix.id, 'ID'), def.zipcode, literal('ZIPCODE')), ..., ), ) Because missing values are very common in source data, RATT introduces special support for when the value for a specific key is missing. Instead of having to write context => context.isNotEmpty('foo') one can simply write the key name instead. The above example is equivalent to the following: etl.use( // The source data does not always include a value for 'zipcode'. when( 'ZIPCODE', triple( iri(prefix.id, 'ID'), def.zipcode, literal('ZIPCODE')), ..., ), ) It is also possible to check if a value is completely missing from the source data with ctx.isEmpty() A note for finding more methods RATT: One of the many advantages using Typescript is code completion. As such any methods available on a class in Ratt can be accessed using your IDE's intellisense ( ctrl + space in VSCODE). In Ratt the context and mw are two such classes that can be accessed in this way. The empty string \u00b6 Because source data often uses the empty string to signify NULL values, this particular string is treated in a special way by RATT. etl.use( when( key.zipcode, // Skipped for the empty string. ...), ) Notice that it is almost never useful to store the empty string in linked data. So the treatment of the empty string as a NULL value is the correct default behavior. Custom functions \u00b6 If we want to extract a string value from the source data, we can write a custom function which can be used with when . when can receive two parameters: string(a key value) or a function. If when receives a string, it checks whether it is empty or not. But in case of a custom method specific instructions are required. For example, (ctx) => ctx.isNotEmpty('foo') && ctx.getString('foo') === 'foo' Notice details: ctx.isNotEmpty('foo') checks whether the string is empty or not and only if it is not empty, the function moves to the next step ctx.getString('bla') === 'something\u2019 , which is the next step, extracts 'foo' when it fulfills the required criteria Tree-shaped data \u00b6 Tree-shaped data is very common in different source systems. We will use the following JSON source data as an example in this section: { \"metadata\": { \"title\": { \"name\": \"Data about countries.\" } }, \"data\": { \"countries\": [ { \"id\": \"nl\", \"name\": \"The Netherlands\" }, { \"id\": \"de\", \"name\": \"Germany\" } ] } } The principles that are documented in this section can be applied to any form of tree-shaped data. For example, the following XML snippet is very similar to the JSON example: <name>Data about countries.</name> nl The Netherlands de Germany Iterating over lists of objects \u00b6 In the previous section, we saw that we were able to assert the name of the first country and the name of the second country. But what do we do if we want to assert the name for every country in the world? And what do we do if some countries have a name in 2 languages, but other countries have a name in 1 or 3 languages? What we need is a simple way to express that we want RATT to make an assertion for every element in a list. RATT uses the forEach function for this purpose. The following code snippet asserts the name for each country in the example data: etl.use( forEach('data.countries', triple( iri(prefix.country, 'id'), rdfs.label, literal('name', 'en'))), ) Notice the following details: - forEach uses the path expression 'data.countries' to identify the list. - Inside the forEach function, each element in the list is made available separately. - This allows the 'id' and 'name' keys to be identified directly. The above code snippet makes one assertion for every element in the \"countries\" list: country:nl rdfs:label 'The Netherlands'@en. country:de rdfs:label 'Germany'@en. Notice that forEach only works for lists whose elements are objects . See Iterating over lists of primitives for dealing with lists that do not contain objects. The elements that forEach iterates over are themselves RATT records. This implies that all functions that work for full RATT records also work for the RATT records inside forEach . The RATT records inside an forEach function are smaller. This allows the regular keys of the iterated-over elements to be accessed directly. In addition to these regular keys, RATT records inside forEach also contain additional keys that simplify common operations. The following subsections explain the following special keys: Index key ( $index ) Parent key ( $parent ) Root key ( $root ) Index key ( $index ) \u00b6 Each RATT record that is made available in forEach contains the $index key. The value of this key is the index of the element in the list. This is the same index that is used to access specific elements in an list, as explained in the section on accessing lists by index . The index key is often useful for assigning a unique subject IRI to every element. Suppose we have the following source data. We do not want to use the values of the \"name\" key for our subject IRI, because these names contain spaces and possibly other problematic characters that make the IRI more difficult to read and use. { \"countries\": [ { \"name\": \"The Netherlands\" }, { \"name\": \"Germany\" }, { \"name\": \"Italy\" }, \u2026 ] } The following code snippet uses the $index key that is made available inside forEach in order to create a unique subject IRI for each country: etl.use( forEach('countries', triple( iri(prefix.country, '$index'), rdfs.label, literal('name', 'en'))), ) This results in the following assertions: country:0 rdfs:label 'The Netherlands'@en. country:1 rdfs:label 'Germany'@en. country:2 rdfs:label 'Italy'@en. Parent key ( $parent ) \u00b6 When forEach iterates through a list of elements, it makes the enclosing parent record available under key $parent . The parent record is the record that directly contains the first key that appears in the path that was specified in forEach . For example, the parent record in the following call is the record that directly contains the \"data\" key: etl.use( forEach('data.countries', \u2026 ) ) The $parent key can be observed when logRecord` is used to print the iterated-over elements to the terminal: etl.use( forEach('data.countries', logRecord()) ) For our example source data, this emits the following 2 RATT records: { \"id\": \"en\", \"name\": \"The Netherlands\", \"$index\": 0, \"$parent\": { \"data\": { \"labels\": [ { \"id\": \"en\", \"name\": \"The Netherlands\", }, { \"id\": \"de\" \"name\": \"Germany\", } ] } }, \"$root\": \"__circular__\" } and: { \"id\": \"de\", \"name\": \"Germany\", \"$index\": 1, \"$parent\": { \"data\": { \"labels\": [ { \"id\": \"en\", \"name\": \"The Netherlands\", }, { \"id\": \"de\" \"name\": \"Germany\", } ] } }, \"$root\": \"__circular__\" } The $root key is explained in the next section . Root key ( $root ) \u00b6 Sometimes it may be necessary to access a part of the original RATT record that is outside of the scope of the forEach call. Every RATT record inside a forEach call contains the \"$root\" key. The value of the root key provides a link to the full RATT record. Because the $root key is part of the linked-to RATT record, it is not possible to print the value of the root key. (This would result in infinite output.) For this reason, the value of the $root key is printed as the special value \"__circular__\" . For the above examples, the parent record and root record are the same, but this is not always the case. Specifically, the parent record and root record are different when forEach calls are nested. The following data contains an inner list (key \"labels\" ) inside an outer list ( \"countries\" ): { \"data\": { \"countries\": [ { \"id\": \"NL\", \"labels\": [ { \"name\": \"The Netherlands\", \"locale\": \"en-us\" }, { \"name\": \"Nederland\", \"locale\": \"nl-nl\" } ] }, { \"id\": \"EN\", \"labels\": [ { \"name\": \"England\", \"locale\": \"en-gb\" } ] } ] } } The following nested forEach call shows the difference between the \"$parent\" key and the $root key. The $parent key allows the individual country objects to be accessed, while the \"$root\" key allows the entire tree to be accessed: etl.use( forEach('data.countries', forEach('labels', logRecord())), ) The following RATT record is printed first (3 records are printed in total). Notice that the value of the outer $parent and \"$root\" keys are now different: - The $parent key allows access to the first country. - The $root key allows access to the full record (describing multiple countries). { \"name\": \"The Netherlands\", \"locale\": \"en-us\", \"$index\": 0, \"$parent\": { \"id\": \"NL\", \"labels\": [ { \"name\": \"The Netherlands\", \"locale\": \"en-us\" }, { \"name\": \"Nederland\", \"locale\": \"nl-nl\" } ], \"$index\": 0, \"$parent\": { \"data\": { \"countries\": [ { \"id\": \"NL\", \"labels\": \"__circular__\" }, { \"id\": \"EN\", \"labels\": [ { \"name\": \"England\", \"locale\": \"en-gb\" } ] } ] } }, \"$root\": \"__circular__\" }, \"$root\": \"__circular__\" } Iterating over lists of primitives \u00b6 In the previous section we showed how to iterate over lists of objects. But what happens if a list does not contain objects but elements of primitive type? Examples include lists of strings or lists of numbers. Function forEach does not work with lists containing primitive types, because it assumes a RATT record structure which can only be provided by objects. Luckily, RATT includes the functions iri.forEach and literal.forEach that can be specifically used to iterate over lists of primitives. etl.use( fromJson({\"id\": \"nl\", \"names\": [\"The Netherlands\", \"Holland\"]}), triple( iri(prefix.country, 'id'), rdfs.label, literal.forEach('names', 'en')), ) This makes the following assertion: country:nl rdfs:label 'The Netherlands'@en, 'Holland'@en. Transforming RDF data \u00b6 If you have RDF data that does not need to be transformed, see copying source data . If you have RDF data that does need to be transformed, you can use the following pattern. This example renames the graph. const app = new Ratt({ defaultGraph: graph.model, prefixes: prefix, sources: { inputFile: Ratt.Source.file(`data/shapes.trig`) }, destinations: { dataset: Ratt.Destination.TriplyDb.rdf(organization, dataset, remoteOptions) }, }) etl.use( loadRdf(app.sources.inputFile), mapQuads( (quad, ctx) => ctx.store.quad( quad.subject, quad.predicate, quad.object, app.prefix.somePrefix(\"graph\") ) ), toRdf(app.destinations.dataset) ) Similarly, you can change all the subject, predicates or objects in your data. Also, you can choose to transform triples of a specific subject, predicate, object or graph name. in this case, you should use: mapQuads( (quad, ctx) => ctx.store.quad( quad.subject, app.prefix.example('new-predicate'), quad.object, quad.graph ), {predicate: app.prefix.example(\"old-predicate\")} )","title":"Tmp"},{"location":"triply-etl/tmp/tmp/#create-statements","text":"After source data is connected and transformed, the RATT Record is ready to be transformed to linked data. Linked data statements are assertions or factual statements that consist of 3 terms (triple) or 4 terms (quadruples). Statements are created with the triple function. Calls to this function are part of the pipeline, and must appear inside the scope of etl.use .","title":"Create statements"},{"location":"triply-etl/tmp/tmp/#static-assertions","text":"Static linked data statements are statements that only make use of constant terms (see working with IRIs ). Constant terms are introduced at the beginning of a RATT pipeline, typically prior to the occurrence of the first etl.use scope. The following static statements make use of the constant terms introduced in the section on working with IRIs . etl.use( // \u201cJohn is a person.\u201d triple(ex.john, a, foaf.Person), // \u201cMary is a person.\u201d triple(ex.mary, a, foaf.Person), )","title":"Create static statements"},{"location":"triply-etl/tmp/tmp/#dynamic-assertions","text":"Dynamic statements are statements that are based on some aspect of the source data. We use the following RATT Record as an example: Country Inhabitants France null Germany 83190556 Netherlands 17650200 We start with creating the prefix and term declarations (see the section on working with IRIs for more information): const prefix_base = Ratt.prefixer('https://triplydb.com/Triply/example/') const prefix = { def: Ratt.prefixer(prefix_base('def/')), id: Ratt.prefixer(prefix_base('id/')), xsd: Ratt.prefixer('http://www.w3.org/2001/XMLSchema#'), } const def = { Country: prefix.def('Country'), name: prefix.def('inhabitants'), } const xsd = { positiveInteger: prefix.xsd('positiveInteger'), string: prefix.xsd('string'), } const input_string = ['Country', 'inhabitants'] With these prefix and term constants in place, a dynamic statement is created as follows: etl.use( triple( iri('Country', {prefix: prefix.id}), def.inhabitants, literal('Inhabitants', {datatype: xsd.positiveInteger})), ) Notice the following details: - iri is used to create a dynamic IRI term. - Arguments Country and Inhabitants allow values for these keys to be used from processed RATT Records. - The IRI prefix for the subject term is specified with constant prefix.id . - literal is used to create a dynamic literal term. - For literals a datatype IRI can be specified. If no datatype IRI is specified then the default IRI is xsd.string . iri.hashed can be used instead of iri when the ETL has a high number of blank nodes and they need more than one constant as input to hash a unique IRI. etl.use( triple( iri.hashed(prefix.id, input_string), def.inhabitants, mw.toLiteral('Inhabitants', {datatype: xsd.positiveInteger})), ) Notice the following details: - input_string can pass more than one constant to hash a unique IRI term.","title":"Create dynamic statements"},{"location":"triply-etl/tmp/tmp/#static-and-dynamic-triples","text":"Be aware that there are different approaches for static and dynamic IRIs: Static IRIs are created with prefix declarations (example [1a]). Dynamic IRIs are created with iri , iri.hashed and prefix declarations (example [2a]). [1a] prefix.id('person') [2a] iri(prefix.id, 'person'), [3a] iri.hashed(prefix.id, ['person','age']), Notation [1a] creates the static IRI [1b]. This IRI does not depend on the currently processed RATT record. Notation [2a] creates the dynamic IRI in [2b], assuming the \"person\" key contains the value \"John\" . This IRI depends on the currently processed RATT record. For a different RATT record, IRI [2c] may be created instead (assuming the \"person\" key contains the value \"Jane\" ). Notation [3a] creates the dynamic IRI in [3b], assuming the \"person\" key contains the value \"Sam\" and the \"age\" key contains the value \"30\" . For a different RATT record, IRI [3c] may be created instead (assuming the \"person\" key contains the value \"Roland\" and \"age\" key contains the value \"20\" ). [1b] id:person [2b] id:John [2c] id:Jane [3b] id:Sam , age: 30 [3c] id:Sam , age: 20","title":"Static and dynamic triples"},{"location":"triply-etl/tmp/tmp/#when-should-you-use-an-iri-instead-of-an-uri-which-is-a-literal","text":"An IRI is used to identify something, for example the city of Amsterdam. It is expected that accessing it returns linked data. An IRI can be used to make assertions about a subject. On the other hand, a URI is expected to return a non-linked data content, for example an HTML website, and can be used as objects in linked data, for example for inserting further information about the subject resource. In the example below, the subject IRI is described further by the object's URL. rdfs:seeAlso \"https://www.iamsterdam.com\"^^xsd:anyURI. An IRI can be created with iri , while an URI is created by using literal .","title":"When should you use an IRI instead of an URI (which is a literal)?"},{"location":"triply-etl/tmp/tmp/#limitation-of-literal-iri-and-irihashed","text":"There is a limitation for both literal , iri and iri.hashed . It is not possible to change the value in the record in the literal , iri and iri.hashed middlewares. The value that is at that moment stored in the record for that key, is then added as either an IRI when called with the iri / iri.hashed function or as a literal when called with the function literal . The limitation is shown in the example below. In the example we want to round the inhabitants number to the nearest thousand. We can not transform this in the literal function. Instead we need to add a change middleware which will execute the transformation. etl.use( change({ key: 'Inhabitants', type: 'number', change: (value) => value/1000 }), triple( iri(prefix.id, 'Country'), def.name, literal('Inhabitants', xsd.positiveInteger) ), )","title":"Limitation of literal, iri and iri.hashed"},{"location":"triply-etl/tmp/tmp/#record-ids","text":"If your RATT Records do not contain a unique ID then you can use the recordId entry that RATT adds automatically. These recordId values are unique for every record processed in the same pipeline, but they are not an entry into the RATT Record by default. Record IDs are consistently assigned across runs of the same pipeline. They generate the same output as long as the input does not change. The following example code shows how the record ID can be added to each RATT Record: etl.use( add({ key: 'ID', value: context => app.prefix.observation(context.recordId.toString()) }), triple(iri(prefix.id, key_id), a, def.Country), )","title":"Record IDs"},{"location":"triply-etl/tmp/tmp/#process-data-conditionally","text":"Source data often contains optional values. These are values that appear in some, but not all records. Source data often contains 'special' values to denote the absence of a value. Common examples are values such as 'NULL' or the empty string ( '' ) or 'clear' outliers such as 9999 for a missing year. We call such values \u2018null values\u2019. The when function supports the creation of triples under certain conditions. The first argument that this function takes establishes whether or not a certain condition is met. After that, one or more additional statement arguments appear that will only be called if the condition is satisfied. The generic structure of when is as follows: etl.use( when( '{condition}', '{statement-1}', '{statement-2}', '{statement-3}', ..., ) ) Notice that it is often useful to specify multiple statements under the same condition: The first statement transforms an optional value, and the second statement uses the transformed optional value to make a triple assertion. The first statement asserts one triple based on the optional value, and the second statement asserts a second triple based on the same optional value.","title":"Process data conditionally"},{"location":"triply-etl/tmp/tmp/#null-values","text":"If a key contains a null value in some records, then we need to specifically identify the criteria under which a triple must be added. etl.use( // The source data uses '9999' to denote an unknown creation year. when( context => context.getNumber('CREATED') != 9999), triple( iri(prefix.id, 'ID'), dct.created, literal('CREATED', xsd.gYear))), Notice that the conditional function inside the when function takes the current RATT context as its single argument and returns a Boolean.","title":"Null values"},{"location":"triply-etl/tmp/tmp/#missing-values","text":"If a value is sometimes completely missing from a source data record, then the following construct can be used to only add a triple in case the value is present: etl.use( // The source data does not always include a value for 'zipcode'. when( context => context.isNotEmpty('ZIPCODE'), triple( iri(prefix.id, 'ID'), def.zipcode, literal('ZIPCODE')), ..., ), ) Because missing values are very common in source data, RATT introduces special support for when the value for a specific key is missing. Instead of having to write context => context.isNotEmpty('foo') one can simply write the key name instead. The above example is equivalent to the following: etl.use( // The source data does not always include a value for 'zipcode'. when( 'ZIPCODE', triple( iri(prefix.id, 'ID'), def.zipcode, literal('ZIPCODE')), ..., ), ) It is also possible to check if a value is completely missing from the source data with ctx.isEmpty() A note for finding more methods RATT: One of the many advantages using Typescript is code completion. As such any methods available on a class in Ratt can be accessed using your IDE's intellisense ( ctrl + space in VSCODE). In Ratt the context and mw are two such classes that can be accessed in this way.","title":"Missing values"},{"location":"triply-etl/tmp/tmp/#the-empty-string","text":"Because source data often uses the empty string to signify NULL values, this particular string is treated in a special way by RATT. etl.use( when( key.zipcode, // Skipped for the empty string. ...), ) Notice that it is almost never useful to store the empty string in linked data. So the treatment of the empty string as a NULL value is the correct default behavior.","title":"The empty string"},{"location":"triply-etl/tmp/tmp/#custom-functions","text":"If we want to extract a string value from the source data, we can write a custom function which can be used with when . when can receive two parameters: string(a key value) or a function. If when receives a string, it checks whether it is empty or not. But in case of a custom method specific instructions are required. For example, (ctx) => ctx.isNotEmpty('foo') && ctx.getString('foo') === 'foo' Notice details: ctx.isNotEmpty('foo') checks whether the string is empty or not and only if it is not empty, the function moves to the next step ctx.getString('bla') === 'something\u2019 , which is the next step, extracts 'foo' when it fulfills the required criteria","title":"Custom functions"},{"location":"triply-etl/tmp/tmp/#tree-shaped-data","text":"Tree-shaped data is very common in different source systems. We will use the following JSON source data as an example in this section: { \"metadata\": { \"title\": { \"name\": \"Data about countries.\" } }, \"data\": { \"countries\": [ { \"id\": \"nl\", \"name\": \"The Netherlands\" }, { \"id\": \"de\", \"name\": \"Germany\" } ] } } The principles that are documented in this section can be applied to any form of tree-shaped data. For example, the following XML snippet is very similar to the JSON example: <name>Data about countries.</name> nl The Netherlands de Germany ","title":"Tree-shaped data"},{"location":"triply-etl/tmp/tmp/#iterating-over-lists-of-objects","text":"In the previous section, we saw that we were able to assert the name of the first country and the name of the second country. But what do we do if we want to assert the name for every country in the world? And what do we do if some countries have a name in 2 languages, but other countries have a name in 1 or 3 languages? What we need is a simple way to express that we want RATT to make an assertion for every element in a list. RATT uses the forEach function for this purpose. The following code snippet asserts the name for each country in the example data: etl.use( forEach('data.countries', triple( iri(prefix.country, 'id'), rdfs.label, literal('name', 'en'))), ) Notice the following details: - forEach uses the path expression 'data.countries' to identify the list. - Inside the forEach function, each element in the list is made available separately. - This allows the 'id' and 'name' keys to be identified directly. The above code snippet makes one assertion for every element in the \"countries\" list: country:nl rdfs:label 'The Netherlands'@en. country:de rdfs:label 'Germany'@en. Notice that forEach only works for lists whose elements are objects . See Iterating over lists of primitives for dealing with lists that do not contain objects. The elements that forEach iterates over are themselves RATT records. This implies that all functions that work for full RATT records also work for the RATT records inside forEach . The RATT records inside an forEach function are smaller. This allows the regular keys of the iterated-over elements to be accessed directly. In addition to these regular keys, RATT records inside forEach also contain additional keys that simplify common operations. The following subsections explain the following special keys: Index key ( $index ) Parent key ( $parent ) Root key ( $root )","title":"Iterating over lists of objects"},{"location":"triply-etl/tmp/tmp/#index-key-index","text":"Each RATT record that is made available in forEach contains the $index key. The value of this key is the index of the element in the list. This is the same index that is used to access specific elements in an list, as explained in the section on accessing lists by index . The index key is often useful for assigning a unique subject IRI to every element. Suppose we have the following source data. We do not want to use the values of the \"name\" key for our subject IRI, because these names contain spaces and possibly other problematic characters that make the IRI more difficult to read and use. { \"countries\": [ { \"name\": \"The Netherlands\" }, { \"name\": \"Germany\" }, { \"name\": \"Italy\" }, \u2026 ] } The following code snippet uses the $index key that is made available inside forEach in order to create a unique subject IRI for each country: etl.use( forEach('countries', triple( iri(prefix.country, '$index'), rdfs.label, literal('name', 'en'))), ) This results in the following assertions: country:0 rdfs:label 'The Netherlands'@en. country:1 rdfs:label 'Germany'@en. country:2 rdfs:label 'Italy'@en.","title":"Index key ($index)"},{"location":"triply-etl/tmp/tmp/#parent-key-parent","text":"When forEach iterates through a list of elements, it makes the enclosing parent record available under key $parent . The parent record is the record that directly contains the first key that appears in the path that was specified in forEach . For example, the parent record in the following call is the record that directly contains the \"data\" key: etl.use( forEach('data.countries', \u2026 ) ) The $parent key can be observed when logRecord` is used to print the iterated-over elements to the terminal: etl.use( forEach('data.countries', logRecord()) ) For our example source data, this emits the following 2 RATT records: { \"id\": \"en\", \"name\": \"The Netherlands\", \"$index\": 0, \"$parent\": { \"data\": { \"labels\": [ { \"id\": \"en\", \"name\": \"The Netherlands\", }, { \"id\": \"de\" \"name\": \"Germany\", } ] } }, \"$root\": \"__circular__\" } and: { \"id\": \"de\", \"name\": \"Germany\", \"$index\": 1, \"$parent\": { \"data\": { \"labels\": [ { \"id\": \"en\", \"name\": \"The Netherlands\", }, { \"id\": \"de\" \"name\": \"Germany\", } ] } }, \"$root\": \"__circular__\" } The $root key is explained in the next section .","title":"Parent key ($parent)"},{"location":"triply-etl/tmp/tmp/#root-key-root","text":"Sometimes it may be necessary to access a part of the original RATT record that is outside of the scope of the forEach call. Every RATT record inside a forEach call contains the \"$root\" key. The value of the root key provides a link to the full RATT record. Because the $root key is part of the linked-to RATT record, it is not possible to print the value of the root key. (This would result in infinite output.) For this reason, the value of the $root key is printed as the special value \"__circular__\" . For the above examples, the parent record and root record are the same, but this is not always the case. Specifically, the parent record and root record are different when forEach calls are nested. The following data contains an inner list (key \"labels\" ) inside an outer list ( \"countries\" ): { \"data\": { \"countries\": [ { \"id\": \"NL\", \"labels\": [ { \"name\": \"The Netherlands\", \"locale\": \"en-us\" }, { \"name\": \"Nederland\", \"locale\": \"nl-nl\" } ] }, { \"id\": \"EN\", \"labels\": [ { \"name\": \"England\", \"locale\": \"en-gb\" } ] } ] } } The following nested forEach call shows the difference between the \"$parent\" key and the $root key. The $parent key allows the individual country objects to be accessed, while the \"$root\" key allows the entire tree to be accessed: etl.use( forEach('data.countries', forEach('labels', logRecord())), ) The following RATT record is printed first (3 records are printed in total). Notice that the value of the outer $parent and \"$root\" keys are now different: - The $parent key allows access to the first country. - The $root key allows access to the full record (describing multiple countries). { \"name\": \"The Netherlands\", \"locale\": \"en-us\", \"$index\": 0, \"$parent\": { \"id\": \"NL\", \"labels\": [ { \"name\": \"The Netherlands\", \"locale\": \"en-us\" }, { \"name\": \"Nederland\", \"locale\": \"nl-nl\" } ], \"$index\": 0, \"$parent\": { \"data\": { \"countries\": [ { \"id\": \"NL\", \"labels\": \"__circular__\" }, { \"id\": \"EN\", \"labels\": [ { \"name\": \"England\", \"locale\": \"en-gb\" } ] } ] } }, \"$root\": \"__circular__\" }, \"$root\": \"__circular__\" }","title":"Root key ($root)"},{"location":"triply-etl/tmp/tmp/#iterating-over-lists-of-primitives","text":"In the previous section we showed how to iterate over lists of objects. But what happens if a list does not contain objects but elements of primitive type? Examples include lists of strings or lists of numbers. Function forEach does not work with lists containing primitive types, because it assumes a RATT record structure which can only be provided by objects. Luckily, RATT includes the functions iri.forEach and literal.forEach that can be specifically used to iterate over lists of primitives. etl.use( fromJson({\"id\": \"nl\", \"names\": [\"The Netherlands\", \"Holland\"]}), triple( iri(prefix.country, 'id'), rdfs.label, literal.forEach('names', 'en')), ) This makes the following assertion: country:nl rdfs:label 'The Netherlands'@en, 'Holland'@en.","title":"Iterating over lists of primitives"},{"location":"triply-etl/tmp/tmp/#transforming-rdf-data","text":"If you have RDF data that does not need to be transformed, see copying source data . If you have RDF data that does need to be transformed, you can use the following pattern. This example renames the graph. const app = new Ratt({ defaultGraph: graph.model, prefixes: prefix, sources: { inputFile: Ratt.Source.file(`data/shapes.trig`) }, destinations: { dataset: Ratt.Destination.TriplyDb.rdf(organization, dataset, remoteOptions) }, }) etl.use( loadRdf(app.sources.inputFile), mapQuads( (quad, ctx) => ctx.store.quad( quad.subject, quad.predicate, quad.object, app.prefix.somePrefix(\"graph\") ) ), toRdf(app.destinations.dataset) ) Similarly, you can change all the subject, predicates or objects in your data. Also, you can choose to transform triples of a specific subject, predicate, object or graph name. in this case, you should use: mapQuads( (quad, ctx) => ctx.store.quad( quad.subject, app.prefix.example('new-predicate'), quad.object, quad.graph ), {predicate: app.prefix.example(\"old-predicate\")} )","title":"Transforming RDF data"},{"location":"triply-etl/transform/","text":"On this page: Transform Next steps Transform \u00b6 The transform step makes changes to the record : graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 1 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] If you do not have a stream of records yet, read the documentation for the Extract step first. Once you have a stream of records, the following transformations are typically needed: Values need to be mapped onto a prepared list of IRIs or literals (e.g. from country names to country-denoting IRIs). Values need to be translated into standards-compliant formats (e.g., from country name to ISO 3166 country codes). Multiple existing values need to be combined into one new value (e.g., street name and house number may be combined into an address). A single value needs to be split into multiple values (e.g., from 'apple, orange' to 'apple' and 'orange' ). Values need to be cleaned because they are dirty in the source (e.g., from '001 ' to 1 ). TriplyETL supports the following four transformation approaches: RATT transformations are a set of commonly used transformation functions that are developed and maintained by Triply. RML can be used to convert non-RDF data into RDF triples. TypeScript can be used to write new customer transformations. XSLT used to transform XML data through the definition of transformation rules in XSLT stylesheets. Next steps \u00b6 The Transform step results in a cleaned and enriched record. The following link documents how you can use the record to make linked data assertions: Step 3. Assert : uses data from the record to make linked data assertions in the internal store.","title":"Overview"},{"location":"triply-etl/transform/#transform","text":"The transform step makes changes to the record : graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 1 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] If you do not have a stream of records yet, read the documentation for the Extract step first. Once you have a stream of records, the following transformations are typically needed: Values need to be mapped onto a prepared list of IRIs or literals (e.g. from country names to country-denoting IRIs). Values need to be translated into standards-compliant formats (e.g., from country name to ISO 3166 country codes). Multiple existing values need to be combined into one new value (e.g., street name and house number may be combined into an address). A single value needs to be split into multiple values (e.g., from 'apple, orange' to 'apple' and 'orange' ). Values need to be cleaned because they are dirty in the source (e.g., from '001 ' to 1 ). TriplyETL supports the following four transformation approaches: RATT transformations are a set of commonly used transformation functions that are developed and maintained by Triply. RML can be used to convert non-RDF data into RDF triples. TypeScript can be used to write new customer transformations. XSLT used to transform XML data through the definition of transformation rules in XSLT stylesheets.","title":"Transform"},{"location":"triply-etl/transform/#next-steps","text":"The Transform step results in a cleaned and enriched record. The following link documents how you can use the record to make linked data assertions: Step 3. Assert : uses data from the record to make linked data assertions in the internal store.","title":"Next steps"},{"location":"triply-etl/transform/ratt/","text":"On this page: RATT transformations Overview addHashedIri() Signature Parameters When to use? Example: lazy identifiers Example: dynamic IRI prefix Example: statement reification addIri() Signature Parameters When to use? See also Example: Prefix declaration and local name Example: Absolute IRI addLiteral() When to use Parameters See also Example: Typed literal Example: String literal Example: Language-tagged string Example: Validate usage addSkolemIri() See also Example addTag() Parameters Throws See also Example addValue() Description Parameters Example Example capitalize() Parameters Example: Class IRIs concat() Description Parameters Example copy() Parameters Example Example encodeHtml() Description Parameters Example geojsonToWkt() Parameters GeoJSON and Well-Known Text (WKT) See also Example jpath() Description Use cases Parameters Example lowercase() Description Use cases Parameters Example padEnd() Description Use cases Parameters Example padStart() Description Use cases Example: Fixed-length identifiers Example: Create year literals replace() Description Parameters Example split() Description Whitespace handling Empty values Use cases Parameters Example: Multiple values in singular table cells Example: Split a complex string into components substring() Description Parameters Example translateAll() Description When to use? Parameters Example: Map source data to IRI values Example: Map source data to IRI properties translateSome() Description Parameters Use cases Example tryLiteral() Description Throws Example See also uppercase() Description Parameters Example wkt.addPoint() Description Parameters Example wkt.project() Description Parameters Example RATT transformations \u00b6 RATT transformations are a core set of functions that are commonly used to change the content of TriplyETL Records. RATT transformations started out as TypeScript transformations that turned out to be useful in a wide variety of TriplyETL pipelines. Triply maintains this core set of transformation functions to allow new ETLs to make use of off-the-shelf functionality that has proven useful in the past. Overview \u00b6 The following transformation functions are currently available: Function Description addHashedIri() Creates an IRI with a content-based hash as the local name. addIri() Create a new IRI based on a prefix and a local name. addLiteral() Create a new literal based on a lexical for and a datatype IRI or language tag. addSkolemIri() Create a new IRI with a random local name, which advertises that it may be consistently replaced with blank nodes. addTag() Create a language tag. addValue() Create a TypeScript value. capitalize() Transforms a string value to its capitalized variant. concat() Combine multiple strings into a new string. copy() Copy a value from an old into a new key. decodeHtml() Decode HTML entities that occur in strings. geojsonToWkt() Change GeoJSON strings to WKT strings. jpath() Uses the JSONPath query language to select a value from the record. lowercase() Change strings to their lowercase variants. padEnd() Pad the end of strings. padStart() Pad the start of strings. replace() Replace part of a string. split() Split a string into multiple substrings. substring() Extract a substring from a string. translateAll() Translate all string values to other values. translateSome() Translate some string values to other strings. tryLiteral() Create literals for which the datatype is not know beforehand. uppercase() Change a string to its uppercase variant. wkt.addPoint() Add a geospatial point using the Well-Known Text (WKT) format. wkt.project() Change the projection of a Well-Known Text (WKT) literal from from Coordinate Reference System into another. addHashedIri() \u00b6 Creates an IRI with a content-based hash as the local name. Signature \u00b6 This function has the following signature: addHashedIri({ prefix, content, key }) Parameters \u00b6 prefix is a dynamic or static IRI. content is an array with static value, or a key that contains a dynamic arrays of values. key is a new key where the created IRI is stored. When to use? \u00b6 This function is used under the following circumstances: Something must be identified with an IRI. The thing that must be identified does not have a readily available identifier. The thing that must be identified does have properties whose combination of values is unique, and can therefore act as a composite identifier. This is called a composite key in database theory. The composed URL is not allowed to be skolemised anymore, e.g. the prefix cannot be an IRI where the pathname starts with /.well-known/genid/ . For this purpose, use transformation addskolemiri() instead. Benefits: The created IRIs are the same across different ETL runs over the same source data. This supports important use cases like change managements / versioning and graph signing. Downsides: It can take a lot of time to figure out which set of properties make every IRI unique. In database theory this process is known as composite key detection . Furthermore, keeping the list of identifying properties up-to-date over time poses a maintenance burden. Example: lazy identifiers \u00b6 Some source data does not include good identifiers for all data items. For example, the following source table contains first names and last names of persons, but neither property is by itself unique: First name Last name Jane Doe Jane Smith John Doe In such cases it may be an option to take a combination of columns, and use that combined sequence of columns for identification. The following snippet uses the combination of the first name and last name fields (in that order) to create a locally unique hash. Together with an IRI prefix, this can be used to create globally unique IRIs: fromJson([{ 'First name': 'John', 'Last name': 'Doe' }]), addHashedIri({ prefix: prefix.person, content: ['First name', 'Last name'], key: '_person', }), pairs('_person', [a, sdo.Person], [sdo.givenName, 'First name'], [sdo.familyName, 'Last name'], ), This results in the following linked data: person:70020947bea6c39cccea20d27e30fbdf a sdo:Person; sdo:givenName 'John'; familyName 'Doe'. Or diagrammatically: graph LR person -- a --> Person person -- sdo:givenName --> john person -- sdo:familyName --> doe Person[sdo:Person]:::model doe['Doe']:::data john['John']:::data person([person:70020947bea6c39cccea20d27e30fbdf]):::data classDef model fill:lightblue classDef meta fill:sandybrown Example: dynamic IRI prefix \u00b6 It is possible to specify a dynamic IRI prefix to addHashedIri() . The following code snippet uses a dynamic IRI prefix from the data source record: fromJson([ { prefix: Iri('https://triplydb.com/my-account/my-dataset/id/person/'), name: 'John Doe', }, { prefix: Iri('https://triplydb.com/my-account/my-dataset/id/product/'), name: '123', }, ]), addHashedIri({ prefix: 'prefix', content: ['name'], key: '_subject', }), triple('_subject', a, owl.NamedIndividual), This results in the following linked data: prefix person: prefix product: person:76f294ac31199b65ec25048439b66f78 a owl:NamedIndividual. product:9154deaa364b289c6b012e99f947f30e a owl:NamedIndividual. Example: statement reification \u00b6 The RDF standard allows individual statements to be identified by a node. This approach is called statement reification and can be used to assert metadata about statements or can represent modalities such as probability and belief. The following snippet uses addHashedIri() to create a unique identifier for each reified statement: fromJson([{ id: '1', name: 'John Doe' }]), // Step 1. Create the subject, predicate, and object terms. addIri({ prefix: prefix.person, content: 'id', key: 'subject' }), addIri({ prefix: prefix.def, content: str('name'), key: 'predicate' }), addLiteral({ content: 'name', key: 'object' }), // Step 2. Create the triple statement. triple('subject', 'predicate', 'object'), // Step 3. Create the reified statement. addHashedIri({ prefix: prefix.statement, content: ['subject', 'predicate', 'object'], key: 'statement', }), pairs('statement', [a, rdf.Statement], [rdf.subject, 'subject'], [rdf.predicate, 'predicate'], [rdf.object, 'object'], ), This results in the following linked data: person:1 def:name 'John Doe'. statement:549decc4c44204a907aa32b4cc9bfaba a rdf:Statement; rdf:subject person:1; rdf:predicate def:name; rdf:object 'John Doe'. Or diagrammatically: graph TB person --- name name --> johndoe statement -- a --> Statement statement -- rdf:subject --> person statement -- rdf:predicate --> name statement -- rdf:object --> johndoe Statement[rdf:Statement]:::model person([person:1]):::data name[def:name]:::model johndoe([John Doe]):::data statement([statement:549decc4c44204a907aa32b4cc9bfaba]):::meta classDef model fill:lightblue classDef meta fill:sandybrown addIri() \u00b6 Creates an IRI based on a specified local name. Signature \u00b6 addIri({ prefix, content, key }) Parameters \u00b6 prefix Optionally, a static or dynamic IRI. This IRI will appear before the local name specified for the content parameter. If the prefix parameter is absent, parameter content is must contain an absolute IRI. content A string, or a key that contains a string. If the prefix parameter is specified, content specifies the IRI local name that appears after the IRI prefix. If the prefix argument is not specified, content is assumed to encode a full absolute IRI. key A new key where the created IRI is stored. When to use? \u00b6 This transformation can be used in the following two ways: 1. By using an IRI prefix and a local name. 2. By using a full absolute IRI. 3. The composed URL is not allowed to be skolemised anymore, e.g. the prefix cannot be an IRI where the pathname starts with /.well-known/genid/ . For this purpose, use transformation addskolemiri() instead. See also \u00b6 If the created IRI is used exactly once, it is often better to use inline function iri() instead. Example: Prefix declaration and local name \u00b6 The following snippet creates an IRI based on the specified IRI prefix and local name: addIri({ prefix: prefix.person, content: 'username', key: '_person', }), triple('_person', a, sdo.Person), This results in the following linked data assertions: person:johndoe a sdo:Person. Or diagrammatically: graph LR johndoe -- a --> Person Person[sdo:Person]:::model johndoe(person:johndoe):::data The following snippet makes the same assertion, but uses assertion iri() instead of transformation addIri() : triple(iri(prefix.person, 'username'), a, sdo.Person), Example: Absolute IRI \u00b6 The following snippet creates the same IRI, but does not use a predefined prefix IRI: addIri({ content: 'https://example.com/id/person/johndoe', key: '_person', }), triple('_person', a, sdo.Person), Or diagrammatically: graph LR johndoe -- a --> Person Person[sdo:Person]:::model johndoe(https://example.com/id/person/johndoe):::data The following snippet uses assertion iri() instead of transformation addIri() : triple(iri('https://example.com/id/person/johndoe'), a, sdo.Person), addLiteral() \u00b6 Creates an new literal and adds it to the Record under the specified key. This transformation can be used in the following 3 ways: If a datatype (key: datatype ) is given, a typed literal is created. If a language tag (key: languageTag ) is given, a language-tagged string (datatype rdf:langString ) is created. If neither a datatype not a language tag is given, a literal with datatype xsd:string is created. When to use \u00b6 This transformation is typically used when: The same literal occurs in two or more statement assertions (function triple() or quad() ). This avoids having to specify the same literal multiple times using function literal() . The datatype or language tag is derived from the source data record. Parameters \u00b6 content A key that contains a string value, or a string specified with function str() . datatype Optionally, a key that stores an IRI or a static IRI. language Optionally, a language tag from the lang object, or a key that stores such a language tag. validate Optionally provide a single validator condition or an array of validator conditions that the literal content should hold to, will return a boolean and throw and error when a validator condition does not hold. key A new key where the created literal is stored. See also \u00b6 If the created literal is used exactly once, it is often better to use the inline function literal() instead. Example: Typed literal \u00b6 The following snippet asserts a triple with a typed literal with datatype IRI xsd:date : fromJson([{ id: '123', date: '2022-01-30' }]), addLiteral({ content: 'date', datatype: xsd.date, key: '_dateCreated', }), triple(iri(prefix.book, 'id'), sdo.dateCreated, '_dateCreated'), This makes the following linked data assertion: book:123 sdo:dateCreated '2022-30-01'^^xsd:date. Notice that the same linked data could have been asserted with the following use the the literal() assertion middleware: fromJson([{ id: '123', date: '2022-01-30' }]), triple(iri(prefix.book, 'id'), sdo.dateCreated, literal('date', xsd.date)), Example: String literal \u00b6 The following snippet asserts a triple with a string literal in the object position: fromJson([{name: 'London'}]), addLiteral({ content: 'name', key: '_name', }), triple(iri(prefix.city, '_name'), skos.prefLabel, '_name') This makes the following assertion: city:London sdo:name 'London'. The literal 'London' has type xsd:string . This is the standard datatype IRI for typed literals in the linked data languages (i.e. Turtle, TriG, and SPARQL). Notice that the same linked data could have been asserted with the following snippet, where the string value 'London' is automatically cast into a string literal: fromJson([{ name: 'London' }]), triple(iri(prefix.city, 'name'), skos.prefLabel, 'name'), Example: Language-tagged string \u00b6 The following snippet asserts a triple with a language-tagged string in the object position: fromJson([{ name: 'London' }]), addLiteral({ content: 'name', language: language.en, key: '_name', }), triple(iri(prefix.city, 'name'), skos.prefLabel, '_name'), This results in the following linked data assertion: city:London skos:prefLabel 'London'@en. Notice that the same linked data could have been asserted with the following use the the literal() assertion middleware: fromJson([{ name: 'London' }]), triple(iri(prefix.city, 'name'), skos.prefLabel, literal('name', lang['en'])), Example: Validate usage \u00b6 The following snippet asserts a triple of a person's email, with the email address being validated in the object position, and should throw an error when the record contains an invalid email address: fromJson([{ name: \"John\", email: 'john.appleseed@example.com' }, {name: 'NA', email: 'notAnEmail' } ]), addLiteral({ content: 'email', validate: isEmail(), key: '_email', }), triple(iri(prefix.person, 'name'), foaf.mbox, '_email'), This results in the following error for the second record: ERROR (Record #2) String \"notAnEmail\" is not an email address. Notice that when using only correct email addresses: fromJson([{ name: \"John\", email: 'john.appleseed@example.com' }, { name: \"Lisa\", email: 'lisa.appleseed@example.com' } ]), addLiteral({ content: 'email', validate: isEmail(), key: '_email', }), triple(iri(prefix.person, 'name'), foaf.mbox, '_email'), It results in the following correct linked data assertion: person:John foaf:mbox \"john.appleseed@example.com\" person:Lisa foaf:mbox \"lisa.appleseed@example.com\" addSkolemIri() \u00b6 Creates a globally unique IRI that is intended to be used as a blank node identifier. Blank nodes are nodes without identification. It relatively difficult to work which such nodes in graph data, since they cannot be identified or dereferenced online. For this reason TriplyETL uses Skolem IRIs to denote blank nodes. This allows blank nodes to be identified and dereferenced. This Skolemization approach is part of the RDF standard. Skolem IRIs are random IRIs whose root path starts with .well-known/genid/ . This makes it easy to distinguish them from other random IRIs that are not used to denote blank nodes. prefix A IRI or a key that contains an IRI whose path starts with .well-known/genid/ . key A new key where the created IRI is stored. See also \u00b6 Tne Skolemization section in the RDF standard explains what Skolem IRIs are and how they should be used. Example \u00b6 The following snippet uses a hashed IRI to create a predictable identifier for a geospatial feature, and a Skolem IRI to create an unpredictable identifier for the geometry. The snippet includes the prefix declarations to illustrate that the path of the Skolem IRI must start with .well-known/genid. . const base = 'https://example.com/' const prefix = { feature: declarePrefix(base('id/feature/')), skolem: declarePrefix(base('.well-known/genid/')), } // Etc fromJson([{ point: 'Point(1.1 2.2)' }]), addHashedIri({ prefix: prefix.feature, content: 'point', key: '_feature', }), addSkolemIri({ prefix: prefix.skolem, key: '_geometry', }), triple('_feature', geo.hasGeometry, '_geometry'), triple('_geometry', geo.asWKT, literal('point', geo.wktLiteral)), This results in the following linked data assertions: feature:22238008e490f725979118f8f2dd9b5a geo:hasGeometry skolem:0cf4b63252a0476a8afc20735aa03da6. skolem:0cf4b63252a0476a8afc20735aa03da6 geo:asWKT 'Point(1.1 2.2)'^^geo:wktLiteral. Notice that the feature IRI will be the same across ELT runs if the source data stays the same, but the Skolem will always be different. Since the Skolem IRIs can be identified by the start of their path ( .well-known/genid/ ), the same linked data assertions can be displayed as follows: feature:22238008e490f725979118f8f2dd9b5a geo:hasGeometry [ geo:asWKT 'Point(1.1 2.2)'^^geo:wktLiteral ]. addTag() \u00b6 This middleware creates a language tag based on a given string value. Parameters \u00b6 content A string value that encodes a language tag according to the IANA language subtag registry. key A new key where the language tag will be stored. Throws \u00b6 An error is emitted if the given string value does not follow the language tag format, or denotes a language tag that is not currently registered. See also \u00b6 The language tag format is defined in the IETF BCP 47 standard (RFC 5646) . Language tags are registered in the IANA language subtag registry . Example \u00b6 The following snippet created a language tag for the Dutch language as spoken in The Netherlands, and uses it to assert a language-tagged string: fromJson([{ label: 'Amsterdam' }]), addTag({ content: 'nl-nl', key: 'lang', }), triple(iri(prefix.city, 'label'), rdfs.label, literal('label', 'lang')), addValue() \u00b6 This middleware allows any value to be added to the Record. Description \u00b6 This middleware is useful for data that is not present in the source data record, but must be used in one or more assertions. Parameters \u00b6 content Any value that can be represented in TypeScript. key A new key where the value is stored. Example \u00b6 The following snippet starts out with an empty source record ( {} ), and adds a new data key to it. The added value is an array that contains a string and a number (in that order). This new value is used in the triple assertion, where 'data[0]' extracts the string element and 'data[1]' extracts the number elements. fromJson([{}]), addValue({ content: ['johndoe', 22], key: 'data', }), triple(iri(prefix.person, 'data[0]'), foaf.age, 'data[1]'), This results in the following linked data assertion: person:johndoe foaf:age 22. Example \u00b6 The following snippet adds a key called _startDate that either contains the start date as specified in the data source record, or the value 'unknown' : fromJson([ { id: '123', start: '2022-02-12' }, { id: '456' }, ]), ifElse({ if: 'start', then: addLiteral({ content: 'start', datatype: xsd.date, key: '_start', }), }, { else: addValue({ content: 'unknown', key: '_start', }), }), triple(iri(prefix.event, 'id'), sdo.startDate, '_start'), This results in the following linked data assertions: event:123 sdo:startDate '2022-02-12'^^xsd:date. event:456 sdo:startDate 'unknown'. capitalize() \u00b6 Transforms a string value to its capitalized variant. If the first character of a string has an uppercase variant, then that variant is used. If the first character does not have an uppercase variant -- because the character is already uppercase or is a punctuation character -- then the string remains unchanged. This transformation can uppercase the first character in any language; the Unicode Default Case Conversion algorithm is used. Parameters \u00b6 content A key that contains a string value. key A new key where the capitalized result is stored. Example: Class IRIs \u00b6 According to convention, classes in linked data are denoted by IRIs whose local name starts with a capital letter. The following source data contains nice values for the type key, but they do not start with a capital letter yet. The following snippet capitalizes the values of the type keys, and uses them to create class IRIs. fromJson([ { id: '1', type: 'location' }, { id: '2', type: 'person' }, ]), capitalize({ content: 'type', key: '_type', }), triple(iri(prefix.id, 'id'), a, iri(prefix.def, '_type')), This results in the following linked data assertions: id:1 a def:Location. id:2 a def:Person. concat() \u00b6 Description \u00b6 Concatenates an array of strings into one new string. An optionally specified separator is placed in between every two consecutive string values. Parameters \u00b6 content An array of key that contain a string and/or strings specified with assertion str() . separator Optionally, the string that is places between every two consecutive string values. key A new key where the concatenated string is stored. Example \u00b6 The following snippet concatenates the first and last name of a person (in that order), using a space separator. fromJson([{ id: '1', first: 'John', last: 'Doe' }]), concat({ content: ['first', 'last'], separator: ' ', key: '_name', }), triple(iri(prefix.person, 'id'), foaf.name, '_name'), This results in the following linked data assertion: person:1 foaf:name 'John Doe'. copy() \u00b6 Makes a plain copy from the value stored in the given key to a new key. Parameters \u00b6 content A value of any type, or a key that contains a value of any type. type Optionally, the name of the TypeScript type of the value. The default value is 'string' . key A new key where the plain copy is stored. Example \u00b6 Plain copies can be used to abbreviate long keys, especially in tree-shaped data like JSON or XML. In the following example, values stored in a long nested key are copies into a short and descriptive key. This is even more useful if the key is used many times in the script. copy({ content: 'record[0].family[0].children.child[0].id.$text', key: 'childId', }), Example \u00b6 Since plain copies introduce a new name for an existing value, the new name can be used to store extra information about the value. The following example stores an English name, if available; or a Dutch name, if available; or no name at all. This is a relatively complex example that can only be accomplished by copying the names for the encountered languages under descriptive key names. fromJson([ { id: '1', names: [ { language: 'en', value: 'London' }, { language: 'fr', value: 'Londres' }, { language: 'nl', value: 'Londen' }, ], }, { id: '2', names: [ { language: 'fr', value: 'Paris' }, { language: 'nl', value: 'Parijs' }, ], }, ]), forEach('names', [ _switch('language', // Plain copy of the English label, if available. ['en', copy({ content: 'value', key: '$parent.en' })], // Plain copy of the Dutch label, if available. ['nl', copy({ content: 'value', key: '$parent.nl' })], ), ]), ifElse({ // Prefer an English label over a Dutch label. if: 'en', then: copy({ content: 'en', key: '_preferredName' }), }, { // If there is no English label, a Dutch label is a second-best option. if: 'nl', then: copy({ content: 'nl', key: '_preferredName' }), }), // If there is either an English or a Dutch label, assert it. when('_preferredName', [ triple(iri(prefix.city, 'id'), rdfs.label, '_preferredName'), ]), This results in the following linked data assertions: city:1 rdfs:label 'London'. city:2 rdfs:label 'Parijs'. encodeHtml() \u00b6 Description \u00b6 This transformation decodes any HTML entities that appear in a given key. The following HTML entities are common in source data: HTML entity Decoded & & > > < < You do not need to use this transformation if you want to assert literals with datatype IRI rdf:HTML . HTML entities are meaningful in HTML, so there they should be preserved. Parameters \u00b6 content A key in the Record that contains string values with HTML entities. key A new key where the decoded content is stored. Example \u00b6 The following snippet takes HTML texts from the source data and asserts them as regular text literals. Since HTML entities are meaningless in regular text, decodeHtml is used to denote these entities. fromJson([ { id: '1', label: 'A&B' }, { id: '2', label: '1 < 2' }, ]), decodeHtml({ content: 'label', key: '_label', }), triple(iri(prefix.id, 'id'), rdfs.label, '_label'), This results in the following linked data assertions: id:1 rdfs:label 'A&B'. id:2 rdfs:label '1 < 2'. geojsonToWkt() \u00b6 Transforms GeoJSON objects to their corresponding Well-Known Text (WKT) serialization strings. Parameters \u00b6 content A key that stores a GeoJSON object. crs Optionally, an IRI that denotes a Coordinate Reference System (CRS). You can use IRIs from the epsg object. If absent, uses https://epsg.io/4326 as the CRS. key A new key where the WKT serialization string is stored GeoJSON and Well-Known Text (WKT) \u00b6 According to the GeoJSON standard , the only Coordinate Reference System (CRS) that is allowed to be used is EPSG:4326/WGS84. In practice, source data sometimes (incorrectly) stores GeoJSON formatted data in other CRSes. An example of this is the GISCO dataset of the European Union, which uses the EPSG:3857 CRS. For cases like these, the optional crs parameter comes in handy. See also \u00b6 The GeoJSON format is standardized in RFC 7946 . The Well-Known Text (WKT) serialization format is standardized as part of ISO/IEC 13249-3:2016 standard . Example \u00b6 The following snippet converts GeoJSON objects that denote traffic light locations to their GeoSPARQL representation. fromJson([ { id: '123', geometry: { type: 'Point', coordinates: [6.256, 48.480], }, }, ]), addIri({ prefix: prefix.feature, content: 'id', key: '_feature', }), geojsonToWkt({ content: 'geometry', crs: epsg[3857], key: '_wkt', }), addHashedIri({ prefix: prefix.geometry, content: '_wkt', key: '_geometry' }), pairs('_feature', [a, def.TrafficLight], [geo.hasGeometry, '_geometry'], ), pairs('_geometry', [a, geo.Geometry], [geo.asWKT, literal('_wkt', geo.wktLiteral)], ), This results in the following linked data assertions: feature:123 a def:TrafficLight; geo:hasGeometry geometry:197e6376c2bd8192c24911f88c330606. geometry:197e6376c2bd8192c24911f88c330606 a geo:Geometry; geo:asWKT 'Point(6.256 48.480)'^^geo:wktLiteral. Or diagrammatically: graph LR feature -- a --> TrafficLight feature -- geo:hasGeometry --> geometry geometry -- a --> Geometry geometry -- geo:asWKT --> wkt Geometry[geo:Geometry]:::model TrafficLight[def:TrafficLight]:::model feature(feature:123):::data geometry(geometry:197e6376c2bd8192c24911f88c330606):::data wkt(\"'Point(6.256 48.480)'^^geo:wktLiteral\"):::data jpath() \u00b6 Description \u00b6 Filters a value based on a JSON Path expression. JSON Path is a query language for JSON. For the syntax of JSON Path expressions, please visit the JSON Path documentation page . Use cases \u00b6 This function simplifies the complex key specification to filter specific values. It can only be used for an object of a triple to create a literal. The result of a function must have a fundamental type. Parameters \u00b6 value A JSON Path expression. Example \u00b6 The following examples will create a literal based on key value : If key 'ISO_639-2' exists: fromJson({ language: [ { 'ISO_639-1': 'en', lcid: 2057, value: 'Paris' }, { 'ISO_639-1': 'nl', 'ISO_639-2': 'nld', lcid: 1043, value: 'Parijs' }, ], }), triple( iri(prefix.city, '$recordId'), rdfs.label, literal(jpath(\"$.language[?(@['ISO_639-2'])].value\"), language.nl) ), If key 'ISO_639-1' is equal to nl : fromJson({ language: [ { 'ISO_639-1': 'en', lcid: 2057, value: 'Paris' }, { 'ISO_639-1': 'nl', 'ISO_639-2': 'nld', lcid: 1043, value: 'Parijs' }, ], }), triple( iri(prefix.city, '$recordId'), rdfs.label, literal(jpath(\"$.language[?(@['ISO_639-1'] =='nl')].value\"), language.nl) ), If key 'lcid' is lower than 1,100: fromJson({ language: [ { 'ISO_639-1': 'en', lcid: 2057, value: 'Paris' }, { 'ISO_639-1': 'nl', 'ISO_639-2': 'nld', lcid: 1043, value: 'Parijs' }, ], }), triple( iri(prefix.city, '$recordId'), rdfs.label, literal(jpath('$.language[?(@.lcid < 1100)].value'), language.nl) ), All three examples generate the following linked data: record:1 rdfs:label 'Parijs'@nl. lowercase() \u00b6 Description \u00b6 Translates a string value to its lowercase variant. This middleware can lowercase strings in any language; the Unicode Default Case Conversion algorithm is used. Use cases \u00b6 Older data formats sometimes use uppercase letters for header names or codes. The lowercase transformation middleware may be used to change such string values to lowercase. Parameters \u00b6 content A key that contains a string value. key A new key where the lowercase variant is stored. Example \u00b6 The following snippet starts out with header values that use uppercase characters exclusively. The lowerCase transformation is used to create lowercase names that can be used to create property IRIs. fromJson([ { from: '1', rel: 'PARENT', to: '2' }, { from: '2', rel: 'CHILD', to: '1' }, ]), lowercase({ content: 'rel', key: '_relationship', }), triple( iri(prefix.id, 'from'), iri(prefix.def, '_relationship'), iri(prefix.id, 'to'), ), This results in the following linked data assertions: id:1 def:parent id:2. id:2 def:child id:1. padEnd() \u00b6 Description \u00b6 Adds a given padding string zero or more times to the end of a string value, until the resulting string value is exactly a given number of characters long. Use cases \u00b6 This transformation is useful for identifiers that must have fixed length and that may be suffixed by zero's. Parameters \u00b6 content A key that contains a string value. If the key contains a numeric value, that value is first cast to string. padString The string that is added to the end of the string value in key content , until the result string has exactly targetLength characters. Can be a static string or a key. targetLength The exact number of characters that the resulting string should have. The string value is copied over as-is when targetLength is smaller than or equal to the length of the string value in key content . This includes cases where targetLength is negative or zero. key A new key where the padded string is stored. Example \u00b6 The following snippet processes identifiers of varying length, and ensures that they have the same length after suffixing '0' characters. fromJson([ { id: '16784' }, { id: '129' }, ]), padEnd({ content: 'id', padString: '0', targetLength: 6, key: '_id', }), This results in the following two Records: [ { \"id\": \"16784\", \"_id\": \"167840\" }, { \"id\": \"129\", \"_id\": \"129000\" } ] padStart() \u00b6 Description \u00b6 Adds a given padding string zero or more times in front of a string value, until the resulting string value is exactly a given number of characters long. Use cases \u00b6 This transformation is useful for identifiers that must have fixed length and that may be prepended by zero's. If key content contains a numeric value, then that value is first cast to string. content A key that contains a string value. padString The string that is added in front of the string value in key content , until the result string has exactly targetLength characters. targetLength The exact number of characters that the resulting string should have. The string value is copied over as-is when targetLength is smaller than or equal to the length of the string value in key content . This includes cases where targetLength is negative or zero. key A new key where the lowercased string is stored. Example: Fixed-length identifiers \u00b6 The following snippet processes identifiers of varying length, and ensures that they have the same length after prefixing '0' characters. fromJson([ { id: '16784' }, { id: '129' }, ]), padStart({ content: 'id', padString: '0', targetLength: 6, key: '_id', }), This results in the following two records: [ { \"id\": \"16784\", \"_id\": \"016784\" }, { \"id\": \"129\", \"_id\": \"000129\" } ] Example: Create year literals \u00b6 In order to create standards-conforming temporal literal, we need to pad the year component to be at least 4 decimal digits long. (This requirement is defined in the XML Schema Datatypes 1.1: Part 2 Datatypes standard.) Suppose that the source data looks as follows: Artifact Year 0001 612 0002 1702 We can ensure that all years have at least 4 decimal digits by calling the following function: padStart({ content: 'Year', padString: '0', targetLength: 4, key: '_lexicalForm', }), triple( iri(prefix.id, 'Artifact'), dct.created, literal('_lexicalForm', xsd.gYear), ), This makes the following linked data assertions: id:0001 dct:created '0612'^^xsd:gYear. id:0002 dct:created '1702'^^xsd:gYear. replace() \u00b6 Description \u00b6 Performs a regular expression replacement to the given input string, and stores the result in a new key. Parameters \u00b6 content A key that contains a string value, or a static string specified with assertion str() . from A JavaScript Regular Expression . to Optionally, a string that replaces potential matches of the Regular Expression ( from ). Use $1 , $2 , etc. to insert matches. If absent, the empty string is used. key A new key where the result of the replacement is stored. Example \u00b6 Suppose the source data contains date/time strings, but only the date component is needed: { \"created\": \"2020-01-02T00:00:00.0Z\" } It is possible to extract only the date part (everything up to the T ) in the following way: replace({ content: 'created', from: /^([^T]*).*$/, to: '$1', key: '_created', }), triple('_creativeWork', dct.created, literal('_created', xsd.date)), This results in the following Record: { \"created\": \"2020-01-02T00:00:00.0Z\", \"_created\": \"2020-01-02\" } split() \u00b6 Description \u00b6 Splits a string into an array of strings, and stores that array in a new key. Whitespace handling \u00b6 This transformation removes any trailing whitespace that remains after the strings are split. This ensures that irregular use of whitespace in the source data is taken care of automatically. Empty values \u00b6 This transformation removes any elements of the splitted string that are empty (after trimmimng). To keep empty entries, use the `` flag. Use cases \u00b6 The transformation is used when: - Tabular source data encodes multiple values inside singular cells. (Such concatenated storage inside cells is a data quality issue, because the table format cannot guarantee that the separator character does not (accidentally) occur inside individual values inside a cell. Tree-shaped source formats are able to store multiple values for the same key reliably, e.g. JSON and XML.) - Source data contains complex string values that can be decomposed into stand-alone components with distinct meaning. Parameters \u00b6 content A key that stores a string, or a string specified with assertion str() . separator A string or a regular expression that is used to separate the content. key A new key where the array of split strings is stored. keepEmptyEntities A boolean flag indicating if the empty values of a splitted string should be kept or not. By default empty values are removed. Example: Multiple values in singular table cells \u00b6 Tabular formats are unable to store more than one value in a cell. Because of this limitation, tabular data sources sometimes encode multiple values in cells by encoding them into one string. A separator character is typically used to distinguish between these multiple values. Suppose that the source data looks as follows: Parent Child John Jane, Jake , Kate ,, The following transformation splits the cells that encode zero or more children for each parent: split({ content: 'Child', separator: ',', key: 'Children', }), This results in the following transformed records: { \"Parent\": \"John\", \"Child\": \"Jane, Jake , \", \"Children\": [ \"Jane\", \"Jake\" ] } { \"Parent\": \"Kate\", \"Child\": \",, \", \"Children\": [] } Notice that trailing whitespace and empty values are dealt with automatically. Since the split() transformation always results in an array of strings, we can use the term assertion iris() afterwards: split({ content: 'children', separator: ',', key: '_children', }), triple( iri(prefix.person, 'parent'), sdo.children, iris(prefix.person, '_children') ), This results in the following linked data assertions: person:johndoe sdo:children person:janedoe, person:jakedoe. Example: Split a complex string into components \u00b6 The following snippet uses a regular expression to split a KIX code. (A KIX code is a standardized format for representing postal addresses in The Netherlands.) fromJson([{ id: '1', KIX: '1231FZ13Xhs' }]), split({ content: 'KIX', separator: /^(\\d{4}[A-Z]{2})(\\d{1,5})(?:X(.{1,6}))/, key: 'KIX_components', }), triple(iri(prefix.id, 'id'), sdo.postalCode, 'KIX_components[1]'), This results in the following record: { \"id\": \"1\", \"KIX\": \"1231FZ13Xhs\", \"KIX_components\": [\"\", \"1231FZ\", \"13\", \"hs\", \"\"] } And in the following linked data assertion: id:1 sdo:postalCode '1231FZ'. substring() \u00b6 Description \u00b6 This middleware takes a substring from the input string and stores the result in a new key. Parameters \u00b6 content A key that stores a string value, or a string specified with assertion str() . start The index of the first character that is included in the substring. The first character has index 0. end Optionally, the index of the first character that is excluded from the substring. If absent, the substring ends at the end of the source string. key The new key in which the substring is stored. Example \u00b6 The Library of Congress MARC format stores the type of record in the sixth character that appears in the leader key. We use substring() to extract this character, and then use transformation translateAll() to map them to a corresponding class IRI: substring({ content: 'metadata.marc:record.marc:leader.$text', start: 6, end: 7, key: '_typeOfRecord', }), translateAll({ content: '_typeOfRecord', table: { a: dcm.Text, k: dcm.StillImage, }, key: '_class', }), triple('_iri', a, '_class'), translateAll() \u00b6 Description \u00b6 Translates all dynamic strings from a specific key to new values of an arbitrary type To , according to a specified translation table. Since this function translates all values, the mapped values can have any type T ; they do not need to be strings. For example, this allows strings to be translated to IRIs or to literals. When to use? \u00b6 This approach is used when: The set of source data values is small. The set of source data values is known ahead of time. The corresponding linked data terms are known ahead of time. The appearance of a new value is considered to be an error in the source data. Parameters \u00b6 content A key that contains a string value. table A translation table from strings to values of some arbitrary type T . nulls Optionally, a list of string values that are considered denote NULL values in the source data. When a NULL value is encountered, the special value undefined is added for the target key . default Optionally, a default value or a default value-determining function that is used for string values that are neither in the translation table ( table ) nor in the NULL values list ( nulls ). The function must return a value of type T . Use of a default value value is equivalent to using the following value-determining function: _ => value . key A new key where the results of the translation are stored. Example: Map source data to IRI values \u00b6 Suppose that source data contains country names. In linked data we want to use IRIs to denote countries, so that we can link additional information. Since the list of countries that appears in the source data is not that long, we can specify a translation table from names to IRIs by hand: change.translateAll({ content: 'country', table: { 'Belgium': country.be, 'Germany': country.de, 'England': country.gb, ..., }, nulls: ['Unknown'], key: '_country', }), when('country', [ triple('_country', a, sdo.Country), ]), Example: Map source data to IRI properties \u00b6 When we relate a creative work to its creator, we sometimes know whether the creator was the actor, architect, author, etc. of the creative work. But in other cases we only know that there is a generic creator relationship. The Library of Congress Relators vocabulary allows us to express specific and generic predicates of this kind. transform.translateAll({ table: { 'actor': rel.act, 'architect': rel.arc, 'author': rel.aut, ..., }, default: rel.oth, // generic relator key: '_relator', }), triple('_creativeWork', '_relator', '_creator'), translateSome() \u00b6 Description \u00b6 Translates some strings, according to the specified translation table, to other strings. Strings that are not translated according to the translation table are copied over as-is. Parameters \u00b6 content A key that contains a string value. table A translation table that specifies translations from strings to strings. key A new key where the translated strings are stored. Use cases \u00b6 Source data often contains some strings that are correct and some that are incorrect. For example, if source data contains a key with city names, some of the names may be misspelled. In such cases, translateSome() can be used to translate the incorrect strings into correct ones. A translateSome() transformation is often performed directly before a translateAll() transformation. The former ensures that all string values are correct (e.g. fixing typo's in city names); the latter ensures that all strings are mapped onto IRIs (e.g. city names mapped onto city-denoting IRIs). Example \u00b6 The following example fixes an encoding issue that occurs in the source data: transform.translateSome({ content: 'name', table: { 'Frysl\ufffd\ufffdn': 'Frysl\u00e2n', // Other entries for typographic fixes go here. ..., }, key: '_name', }), tryLiteral() \u00b6 Description \u00b6 This transformation is used when string values must be mapped onto literals with varying datatype IRIs. The datatype IRIs that could apply are specified in a list. The specified datatype IRIs are tried out from left to right. The first datatype IRI that results in a valid literal is chosen. content A key that contains a string value, or a string value specified with assertion str() . datatypes An array of two or more datatype IRIs. key A new key where the created literal is stored. Throws \u00b6 An exception is emitted if a string value does not belong to the lexical space of any of the specified datatype IRIs. Example \u00b6 A literal is valid if the given string value appears in the lexical space of a specific datatype IRI. This is best explained with an example: tryLiteral({ content: 'date', datatypes: [xsd.date, xsd.gYearMonth, xsd.gYear], key: '_publicationDate', }), Source data in key 'date' Result in key '_date' '1900-01-02' '1900-01-02'^^xsd:date '1900' '1900'^^xsd:gYear '02-01-1900' An error is emitted. If we do not want to emit errors for string values that cannot be satisfy any of the specified datatype IRIs, we may choose to include xsd.string as the last datatype IRI in the list. Do notice however that this will result in dates that cannot be compared on a timeline, since they were not transformed to an XSD date/time datatype. See also \u00b6 You only need to use tryLiteral() if the datatype IRI varies from record to record. If the datatype IRI is the same for every record, then the regular assertion function literal() should be used instead. uppercase() \u00b6 Description \u00b6 Translates a string value to its uppercase variant. This middleware can uppercase strings in any language; the Unicode Default Case Conversion algorithm is used for this. Parameters \u00b6 content A key that contains a string value. key A new key where the uppercase variant is stored. Example \u00b6 In the following example, the string in the key 'countryCode' becomes the uppercase string: fromJson({ place: 'Amsterdam', countryCode: 'nl' }), uppercase({ content: 'countryCode', key: '_countryCode' }), triple(iri(prefix.id, 'place'), iri(prefix.geonames, str('countryCode')), '_countryCode') This results in the following linked data assertion: city:Amsterdam geonames:countryCode \"NL\" wkt.addPoint() \u00b6 Description \u00b6 Creates a Well-Known Text (WKT) serialization string from the corresponding geospatial point. Parameters \u00b6 latitude A key or a string assertion ( str() ) with latitude. longitude A key or a string assertion ( str() ) with longitude. crs Optionally, an IRI that denotes a Coordinate Reference System (CRS). You can use IRIs from the epsg object. If absent, uses EPSG:4326/WGS84 as the CRS. key A new key where the WKT string is stored. Example \u00b6 The following example creates a WKT literal from the geo coordinates of Amsterdam: fromJson({ place: 'Amsterdam', lat: 52.37308, long: 4.89245 }), wkt.addPoint({ latitude: 'lat', longitude: 'long', key: '_point' }), triple(iri(prefix.city, 'place'), geo.asWKT, '_point'), This results in the following record of the key '_point' : { \"_point\": { \"termType\": \"Literal\", \"value\": \"Point (52.37308 4.89245)\", \"language\": \"\", \"datatype\": { \"termType\": \"NamedNode\", \"value\": \"http://www.opengis.net/ont/geosparql#wktLiteral\", \"validationStatus\": \"canonical\" }, \"validationStatus\": \"canonical\" } } And in the following linked data assertion: city:Amstedam geo:asWKT \"Point (52.37308 4.89245)\"^^geo:wktLiteral wkt.project() \u00b6 Description \u00b6 Converts the projection of a Well-Known Text (WKT) literal from one Coordinate Reference System to another one. Parameters \u00b6 content An array of keys or numbers. key A new key where the new projection is stored. fromCrs : an IRI that denotes a Coordinate Reference System (CRS) of the content . toCrs : Optionally, an IRI that denotes a Coordinate Reference System (CRS) we want to convert to. If absent, uses EPSG:4326/WGS84 as the CRS. Example \u00b6 The following example converts an array with latitude and longitude in content key from Dutch grid coordinates (Rijksdriehoeks-coordinates) to WGS84 coordinates. fromJson({ place: 'Amsterdam', lat: 121307, long: 487360 }), wkt.project({ content: ['lat', 'long'], key: '_coordinates', fromCrs: epsg[666], toCrs: epsg[4326] }), This results in the following record of the key '_coordinates' : { \"_coordinates\": [ 52.374671935135474, 4.892803721020475 ] } We can now use the converted result to create a WKT Point() using addPoint() : wkt.addPoint({ latitude: '_coordinates[0]', longitude: '_coordinates[1]', key: '_point' }), triple(iri(prefix.id, 'place'), geo.asWKT, '_point') This code snippet creates the following linked data assertion: city:Amstedam geo:asWKT \"Point (52.374671935135474 4.892803721020475)\"^^geo:asWKT","title":"RATT"},{"location":"triply-etl/transform/ratt/#ratt-transformations","text":"RATT transformations are a core set of functions that are commonly used to change the content of TriplyETL Records. RATT transformations started out as TypeScript transformations that turned out to be useful in a wide variety of TriplyETL pipelines. Triply maintains this core set of transformation functions to allow new ETLs to make use of off-the-shelf functionality that has proven useful in the past.","title":"RATT transformations"},{"location":"triply-etl/transform/ratt/#overview","text":"The following transformation functions are currently available: Function Description addHashedIri() Creates an IRI with a content-based hash as the local name. addIri() Create a new IRI based on a prefix and a local name. addLiteral() Create a new literal based on a lexical for and a datatype IRI or language tag. addSkolemIri() Create a new IRI with a random local name, which advertises that it may be consistently replaced with blank nodes. addTag() Create a language tag. addValue() Create a TypeScript value. capitalize() Transforms a string value to its capitalized variant. concat() Combine multiple strings into a new string. copy() Copy a value from an old into a new key. decodeHtml() Decode HTML entities that occur in strings. geojsonToWkt() Change GeoJSON strings to WKT strings. jpath() Uses the JSONPath query language to select a value from the record. lowercase() Change strings to their lowercase variants. padEnd() Pad the end of strings. padStart() Pad the start of strings. replace() Replace part of a string. split() Split a string into multiple substrings. substring() Extract a substring from a string. translateAll() Translate all string values to other values. translateSome() Translate some string values to other strings. tryLiteral() Create literals for which the datatype is not know beforehand. uppercase() Change a string to its uppercase variant. wkt.addPoint() Add a geospatial point using the Well-Known Text (WKT) format. wkt.project() Change the projection of a Well-Known Text (WKT) literal from from Coordinate Reference System into another.","title":"Overview"},{"location":"triply-etl/transform/ratt/#addhashediri","text":"Creates an IRI with a content-based hash as the local name.","title":"addHashedIri()"},{"location":"triply-etl/transform/ratt/#signature","text":"This function has the following signature: addHashedIri({ prefix, content, key })","title":"Signature"},{"location":"triply-etl/transform/ratt/#parameters","text":"prefix is a dynamic or static IRI. content is an array with static value, or a key that contains a dynamic arrays of values. key is a new key where the created IRI is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#when-to-use","text":"This function is used under the following circumstances: Something must be identified with an IRI. The thing that must be identified does not have a readily available identifier. The thing that must be identified does have properties whose combination of values is unique, and can therefore act as a composite identifier. This is called a composite key in database theory. The composed URL is not allowed to be skolemised anymore, e.g. the prefix cannot be an IRI where the pathname starts with /.well-known/genid/ . For this purpose, use transformation addskolemiri() instead. Benefits: The created IRIs are the same across different ETL runs over the same source data. This supports important use cases like change managements / versioning and graph signing. Downsides: It can take a lot of time to figure out which set of properties make every IRI unique. In database theory this process is known as composite key detection . Furthermore, keeping the list of identifying properties up-to-date over time poses a maintenance burden.","title":"When to use?"},{"location":"triply-etl/transform/ratt/#example-lazy-identifiers","text":"Some source data does not include good identifiers for all data items. For example, the following source table contains first names and last names of persons, but neither property is by itself unique: First name Last name Jane Doe Jane Smith John Doe In such cases it may be an option to take a combination of columns, and use that combined sequence of columns for identification. The following snippet uses the combination of the first name and last name fields (in that order) to create a locally unique hash. Together with an IRI prefix, this can be used to create globally unique IRIs: fromJson([{ 'First name': 'John', 'Last name': 'Doe' }]), addHashedIri({ prefix: prefix.person, content: ['First name', 'Last name'], key: '_person', }), pairs('_person', [a, sdo.Person], [sdo.givenName, 'First name'], [sdo.familyName, 'Last name'], ), This results in the following linked data: person:70020947bea6c39cccea20d27e30fbdf a sdo:Person; sdo:givenName 'John'; familyName 'Doe'. Or diagrammatically: graph LR person -- a --> Person person -- sdo:givenName --> john person -- sdo:familyName --> doe Person[sdo:Person]:::model doe['Doe']:::data john['John']:::data person([person:70020947bea6c39cccea20d27e30fbdf]):::data classDef model fill:lightblue classDef meta fill:sandybrown","title":"Example: lazy identifiers"},{"location":"triply-etl/transform/ratt/#example-dynamic-iri-prefix","text":"It is possible to specify a dynamic IRI prefix to addHashedIri() . The following code snippet uses a dynamic IRI prefix from the data source record: fromJson([ { prefix: Iri('https://triplydb.com/my-account/my-dataset/id/person/'), name: 'John Doe', }, { prefix: Iri('https://triplydb.com/my-account/my-dataset/id/product/'), name: '123', }, ]), addHashedIri({ prefix: 'prefix', content: ['name'], key: '_subject', }), triple('_subject', a, owl.NamedIndividual), This results in the following linked data: prefix person: prefix product: person:76f294ac31199b65ec25048439b66f78 a owl:NamedIndividual. product:9154deaa364b289c6b012e99f947f30e a owl:NamedIndividual.","title":"Example: dynamic IRI prefix"},{"location":"triply-etl/transform/ratt/#example-statement-reification","text":"The RDF standard allows individual statements to be identified by a node. This approach is called statement reification and can be used to assert metadata about statements or can represent modalities such as probability and belief. The following snippet uses addHashedIri() to create a unique identifier for each reified statement: fromJson([{ id: '1', name: 'John Doe' }]), // Step 1. Create the subject, predicate, and object terms. addIri({ prefix: prefix.person, content: 'id', key: 'subject' }), addIri({ prefix: prefix.def, content: str('name'), key: 'predicate' }), addLiteral({ content: 'name', key: 'object' }), // Step 2. Create the triple statement. triple('subject', 'predicate', 'object'), // Step 3. Create the reified statement. addHashedIri({ prefix: prefix.statement, content: ['subject', 'predicate', 'object'], key: 'statement', }), pairs('statement', [a, rdf.Statement], [rdf.subject, 'subject'], [rdf.predicate, 'predicate'], [rdf.object, 'object'], ), This results in the following linked data: person:1 def:name 'John Doe'. statement:549decc4c44204a907aa32b4cc9bfaba a rdf:Statement; rdf:subject person:1; rdf:predicate def:name; rdf:object 'John Doe'. Or diagrammatically: graph TB person --- name name --> johndoe statement -- a --> Statement statement -- rdf:subject --> person statement -- rdf:predicate --> name statement -- rdf:object --> johndoe Statement[rdf:Statement]:::model person([person:1]):::data name[def:name]:::model johndoe([John Doe]):::data statement([statement:549decc4c44204a907aa32b4cc9bfaba]):::meta classDef model fill:lightblue classDef meta fill:sandybrown","title":"Example: statement reification"},{"location":"triply-etl/transform/ratt/#addiri","text":"Creates an IRI based on a specified local name.","title":"addIri()"},{"location":"triply-etl/transform/ratt/#signature_1","text":"addIri({ prefix, content, key })","title":"Signature"},{"location":"triply-etl/transform/ratt/#parameters_1","text":"prefix Optionally, a static or dynamic IRI. This IRI will appear before the local name specified for the content parameter. If the prefix parameter is absent, parameter content is must contain an absolute IRI. content A string, or a key that contains a string. If the prefix parameter is specified, content specifies the IRI local name that appears after the IRI prefix. If the prefix argument is not specified, content is assumed to encode a full absolute IRI. key A new key where the created IRI is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#when-to-use_1","text":"This transformation can be used in the following two ways: 1. By using an IRI prefix and a local name. 2. By using a full absolute IRI. 3. The composed URL is not allowed to be skolemised anymore, e.g. the prefix cannot be an IRI where the pathname starts with /.well-known/genid/ . For this purpose, use transformation addskolemiri() instead.","title":"When to use?"},{"location":"triply-etl/transform/ratt/#see-also","text":"If the created IRI is used exactly once, it is often better to use inline function iri() instead.","title":"See also"},{"location":"triply-etl/transform/ratt/#example-prefix-declaration-and-local-name","text":"The following snippet creates an IRI based on the specified IRI prefix and local name: addIri({ prefix: prefix.person, content: 'username', key: '_person', }), triple('_person', a, sdo.Person), This results in the following linked data assertions: person:johndoe a sdo:Person. Or diagrammatically: graph LR johndoe -- a --> Person Person[sdo:Person]:::model johndoe(person:johndoe):::data The following snippet makes the same assertion, but uses assertion iri() instead of transformation addIri() : triple(iri(prefix.person, 'username'), a, sdo.Person),","title":"Example: Prefix declaration and local name"},{"location":"triply-etl/transform/ratt/#example-absolute-iri","text":"The following snippet creates the same IRI, but does not use a predefined prefix IRI: addIri({ content: 'https://example.com/id/person/johndoe', key: '_person', }), triple('_person', a, sdo.Person), Or diagrammatically: graph LR johndoe -- a --> Person Person[sdo:Person]:::model johndoe(https://example.com/id/person/johndoe):::data The following snippet uses assertion iri() instead of transformation addIri() : triple(iri('https://example.com/id/person/johndoe'), a, sdo.Person),","title":"Example: Absolute IRI"},{"location":"triply-etl/transform/ratt/#addliteral","text":"Creates an new literal and adds it to the Record under the specified key. This transformation can be used in the following 3 ways: If a datatype (key: datatype ) is given, a typed literal is created. If a language tag (key: languageTag ) is given, a language-tagged string (datatype rdf:langString ) is created. If neither a datatype not a language tag is given, a literal with datatype xsd:string is created.","title":"addLiteral()"},{"location":"triply-etl/transform/ratt/#when-to-use_2","text":"This transformation is typically used when: The same literal occurs in two or more statement assertions (function triple() or quad() ). This avoids having to specify the same literal multiple times using function literal() . The datatype or language tag is derived from the source data record.","title":"When to use"},{"location":"triply-etl/transform/ratt/#parameters_2","text":"content A key that contains a string value, or a string specified with function str() . datatype Optionally, a key that stores an IRI or a static IRI. language Optionally, a language tag from the lang object, or a key that stores such a language tag. validate Optionally provide a single validator condition or an array of validator conditions that the literal content should hold to, will return a boolean and throw and error when a validator condition does not hold. key A new key where the created literal is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#see-also_1","text":"If the created literal is used exactly once, it is often better to use the inline function literal() instead.","title":"See also"},{"location":"triply-etl/transform/ratt/#example-typed-literal","text":"The following snippet asserts a triple with a typed literal with datatype IRI xsd:date : fromJson([{ id: '123', date: '2022-01-30' }]), addLiteral({ content: 'date', datatype: xsd.date, key: '_dateCreated', }), triple(iri(prefix.book, 'id'), sdo.dateCreated, '_dateCreated'), This makes the following linked data assertion: book:123 sdo:dateCreated '2022-30-01'^^xsd:date. Notice that the same linked data could have been asserted with the following use the the literal() assertion middleware: fromJson([{ id: '123', date: '2022-01-30' }]), triple(iri(prefix.book, 'id'), sdo.dateCreated, literal('date', xsd.date)),","title":"Example: Typed literal"},{"location":"triply-etl/transform/ratt/#example-string-literal","text":"The following snippet asserts a triple with a string literal in the object position: fromJson([{name: 'London'}]), addLiteral({ content: 'name', key: '_name', }), triple(iri(prefix.city, '_name'), skos.prefLabel, '_name') This makes the following assertion: city:London sdo:name 'London'. The literal 'London' has type xsd:string . This is the standard datatype IRI for typed literals in the linked data languages (i.e. Turtle, TriG, and SPARQL). Notice that the same linked data could have been asserted with the following snippet, where the string value 'London' is automatically cast into a string literal: fromJson([{ name: 'London' }]), triple(iri(prefix.city, 'name'), skos.prefLabel, 'name'),","title":"Example: String literal"},{"location":"triply-etl/transform/ratt/#example-language-tagged-string","text":"The following snippet asserts a triple with a language-tagged string in the object position: fromJson([{ name: 'London' }]), addLiteral({ content: 'name', language: language.en, key: '_name', }), triple(iri(prefix.city, 'name'), skos.prefLabel, '_name'), This results in the following linked data assertion: city:London skos:prefLabel 'London'@en. Notice that the same linked data could have been asserted with the following use the the literal() assertion middleware: fromJson([{ name: 'London' }]), triple(iri(prefix.city, 'name'), skos.prefLabel, literal('name', lang['en'])),","title":"Example: Language-tagged string"},{"location":"triply-etl/transform/ratt/#example-validate-usage","text":"The following snippet asserts a triple of a person's email, with the email address being validated in the object position, and should throw an error when the record contains an invalid email address: fromJson([{ name: \"John\", email: 'john.appleseed@example.com' }, {name: 'NA', email: 'notAnEmail' } ]), addLiteral({ content: 'email', validate: isEmail(), key: '_email', }), triple(iri(prefix.person, 'name'), foaf.mbox, '_email'), This results in the following error for the second record: ERROR (Record #2) String \"notAnEmail\" is not an email address. Notice that when using only correct email addresses: fromJson([{ name: \"John\", email: 'john.appleseed@example.com' }, { name: \"Lisa\", email: 'lisa.appleseed@example.com' } ]), addLiteral({ content: 'email', validate: isEmail(), key: '_email', }), triple(iri(prefix.person, 'name'), foaf.mbox, '_email'), It results in the following correct linked data assertion: person:John foaf:mbox \"john.appleseed@example.com\" person:Lisa foaf:mbox \"lisa.appleseed@example.com\"","title":"Example: Validate usage"},{"location":"triply-etl/transform/ratt/#addskolemiri","text":"Creates a globally unique IRI that is intended to be used as a blank node identifier. Blank nodes are nodes without identification. It relatively difficult to work which such nodes in graph data, since they cannot be identified or dereferenced online. For this reason TriplyETL uses Skolem IRIs to denote blank nodes. This allows blank nodes to be identified and dereferenced. This Skolemization approach is part of the RDF standard. Skolem IRIs are random IRIs whose root path starts with .well-known/genid/ . This makes it easy to distinguish them from other random IRIs that are not used to denote blank nodes. prefix A IRI or a key that contains an IRI whose path starts with .well-known/genid/ . key A new key where the created IRI is stored.","title":"addSkolemIri()"},{"location":"triply-etl/transform/ratt/#see-also_2","text":"Tne Skolemization section in the RDF standard explains what Skolem IRIs are and how they should be used.","title":"See also"},{"location":"triply-etl/transform/ratt/#example","text":"The following snippet uses a hashed IRI to create a predictable identifier for a geospatial feature, and a Skolem IRI to create an unpredictable identifier for the geometry. The snippet includes the prefix declarations to illustrate that the path of the Skolem IRI must start with .well-known/genid. . const base = 'https://example.com/' const prefix = { feature: declarePrefix(base('id/feature/')), skolem: declarePrefix(base('.well-known/genid/')), } // Etc fromJson([{ point: 'Point(1.1 2.2)' }]), addHashedIri({ prefix: prefix.feature, content: 'point', key: '_feature', }), addSkolemIri({ prefix: prefix.skolem, key: '_geometry', }), triple('_feature', geo.hasGeometry, '_geometry'), triple('_geometry', geo.asWKT, literal('point', geo.wktLiteral)), This results in the following linked data assertions: feature:22238008e490f725979118f8f2dd9b5a geo:hasGeometry skolem:0cf4b63252a0476a8afc20735aa03da6. skolem:0cf4b63252a0476a8afc20735aa03da6 geo:asWKT 'Point(1.1 2.2)'^^geo:wktLiteral. Notice that the feature IRI will be the same across ELT runs if the source data stays the same, but the Skolem will always be different. Since the Skolem IRIs can be identified by the start of their path ( .well-known/genid/ ), the same linked data assertions can be displayed as follows: feature:22238008e490f725979118f8f2dd9b5a geo:hasGeometry [ geo:asWKT 'Point(1.1 2.2)'^^geo:wktLiteral ].","title":"Example"},{"location":"triply-etl/transform/ratt/#addtag","text":"This middleware creates a language tag based on a given string value.","title":"addTag()"},{"location":"triply-etl/transform/ratt/#parameters_3","text":"content A string value that encodes a language tag according to the IANA language subtag registry. key A new key where the language tag will be stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#throws","text":"An error is emitted if the given string value does not follow the language tag format, or denotes a language tag that is not currently registered.","title":"Throws"},{"location":"triply-etl/transform/ratt/#see-also_3","text":"The language tag format is defined in the IETF BCP 47 standard (RFC 5646) . Language tags are registered in the IANA language subtag registry .","title":"See also"},{"location":"triply-etl/transform/ratt/#example_1","text":"The following snippet created a language tag for the Dutch language as spoken in The Netherlands, and uses it to assert a language-tagged string: fromJson([{ label: 'Amsterdam' }]), addTag({ content: 'nl-nl', key: 'lang', }), triple(iri(prefix.city, 'label'), rdfs.label, literal('label', 'lang')),","title":"Example"},{"location":"triply-etl/transform/ratt/#addvalue","text":"This middleware allows any value to be added to the Record.","title":"addValue()"},{"location":"triply-etl/transform/ratt/#description","text":"This middleware is useful for data that is not present in the source data record, but must be used in one or more assertions.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_4","text":"content Any value that can be represented in TypeScript. key A new key where the value is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_2","text":"The following snippet starts out with an empty source record ( {} ), and adds a new data key to it. The added value is an array that contains a string and a number (in that order). This new value is used in the triple assertion, where 'data[0]' extracts the string element and 'data[1]' extracts the number elements. fromJson([{}]), addValue({ content: ['johndoe', 22], key: 'data', }), triple(iri(prefix.person, 'data[0]'), foaf.age, 'data[1]'), This results in the following linked data assertion: person:johndoe foaf:age 22.","title":"Example"},{"location":"triply-etl/transform/ratt/#example_3","text":"The following snippet adds a key called _startDate that either contains the start date as specified in the data source record, or the value 'unknown' : fromJson([ { id: '123', start: '2022-02-12' }, { id: '456' }, ]), ifElse({ if: 'start', then: addLiteral({ content: 'start', datatype: xsd.date, key: '_start', }), }, { else: addValue({ content: 'unknown', key: '_start', }), }), triple(iri(prefix.event, 'id'), sdo.startDate, '_start'), This results in the following linked data assertions: event:123 sdo:startDate '2022-02-12'^^xsd:date. event:456 sdo:startDate 'unknown'.","title":"Example"},{"location":"triply-etl/transform/ratt/#capitalize","text":"Transforms a string value to its capitalized variant. If the first character of a string has an uppercase variant, then that variant is used. If the first character does not have an uppercase variant -- because the character is already uppercase or is a punctuation character -- then the string remains unchanged. This transformation can uppercase the first character in any language; the Unicode Default Case Conversion algorithm is used.","title":"capitalize()"},{"location":"triply-etl/transform/ratt/#parameters_5","text":"content A key that contains a string value. key A new key where the capitalized result is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example-class-iris","text":"According to convention, classes in linked data are denoted by IRIs whose local name starts with a capital letter. The following source data contains nice values for the type key, but they do not start with a capital letter yet. The following snippet capitalizes the values of the type keys, and uses them to create class IRIs. fromJson([ { id: '1', type: 'location' }, { id: '2', type: 'person' }, ]), capitalize({ content: 'type', key: '_type', }), triple(iri(prefix.id, 'id'), a, iri(prefix.def, '_type')), This results in the following linked data assertions: id:1 a def:Location. id:2 a def:Person.","title":"Example: Class IRIs"},{"location":"triply-etl/transform/ratt/#concat","text":"","title":"concat()"},{"location":"triply-etl/transform/ratt/#description_1","text":"Concatenates an array of strings into one new string. An optionally specified separator is placed in between every two consecutive string values.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_6","text":"content An array of key that contain a string and/or strings specified with assertion str() . separator Optionally, the string that is places between every two consecutive string values. key A new key where the concatenated string is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_4","text":"The following snippet concatenates the first and last name of a person (in that order), using a space separator. fromJson([{ id: '1', first: 'John', last: 'Doe' }]), concat({ content: ['first', 'last'], separator: ' ', key: '_name', }), triple(iri(prefix.person, 'id'), foaf.name, '_name'), This results in the following linked data assertion: person:1 foaf:name 'John Doe'.","title":"Example"},{"location":"triply-etl/transform/ratt/#copy","text":"Makes a plain copy from the value stored in the given key to a new key.","title":"copy()"},{"location":"triply-etl/transform/ratt/#parameters_7","text":"content A value of any type, or a key that contains a value of any type. type Optionally, the name of the TypeScript type of the value. The default value is 'string' . key A new key where the plain copy is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_5","text":"Plain copies can be used to abbreviate long keys, especially in tree-shaped data like JSON or XML. In the following example, values stored in a long nested key are copies into a short and descriptive key. This is even more useful if the key is used many times in the script. copy({ content: 'record[0].family[0].children.child[0].id.$text', key: 'childId', }),","title":"Example"},{"location":"triply-etl/transform/ratt/#example_6","text":"Since plain copies introduce a new name for an existing value, the new name can be used to store extra information about the value. The following example stores an English name, if available; or a Dutch name, if available; or no name at all. This is a relatively complex example that can only be accomplished by copying the names for the encountered languages under descriptive key names. fromJson([ { id: '1', names: [ { language: 'en', value: 'London' }, { language: 'fr', value: 'Londres' }, { language: 'nl', value: 'Londen' }, ], }, { id: '2', names: [ { language: 'fr', value: 'Paris' }, { language: 'nl', value: 'Parijs' }, ], }, ]), forEach('names', [ _switch('language', // Plain copy of the English label, if available. ['en', copy({ content: 'value', key: '$parent.en' })], // Plain copy of the Dutch label, if available. ['nl', copy({ content: 'value', key: '$parent.nl' })], ), ]), ifElse({ // Prefer an English label over a Dutch label. if: 'en', then: copy({ content: 'en', key: '_preferredName' }), }, { // If there is no English label, a Dutch label is a second-best option. if: 'nl', then: copy({ content: 'nl', key: '_preferredName' }), }), // If there is either an English or a Dutch label, assert it. when('_preferredName', [ triple(iri(prefix.city, 'id'), rdfs.label, '_preferredName'), ]), This results in the following linked data assertions: city:1 rdfs:label 'London'. city:2 rdfs:label 'Parijs'.","title":"Example"},{"location":"triply-etl/transform/ratt/#encodehtml","text":"","title":"encodeHtml()"},{"location":"triply-etl/transform/ratt/#description_2","text":"This transformation decodes any HTML entities that appear in a given key. The following HTML entities are common in source data: HTML entity Decoded & & > > < < You do not need to use this transformation if you want to assert literals with datatype IRI rdf:HTML . HTML entities are meaningful in HTML, so there they should be preserved.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_8","text":"content A key in the Record that contains string values with HTML entities. key A new key where the decoded content is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_7","text":"The following snippet takes HTML texts from the source data and asserts them as regular text literals. Since HTML entities are meaningless in regular text, decodeHtml is used to denote these entities. fromJson([ { id: '1', label: 'A&B' }, { id: '2', label: '1 < 2' }, ]), decodeHtml({ content: 'label', key: '_label', }), triple(iri(prefix.id, 'id'), rdfs.label, '_label'), This results in the following linked data assertions: id:1 rdfs:label 'A&B'. id:2 rdfs:label '1 < 2'.","title":"Example"},{"location":"triply-etl/transform/ratt/#geojsontowkt","text":"Transforms GeoJSON objects to their corresponding Well-Known Text (WKT) serialization strings.","title":"geojsonToWkt()"},{"location":"triply-etl/transform/ratt/#parameters_9","text":"content A key that stores a GeoJSON object. crs Optionally, an IRI that denotes a Coordinate Reference System (CRS). You can use IRIs from the epsg object. If absent, uses https://epsg.io/4326 as the CRS. key A new key where the WKT serialization string is stored","title":"Parameters"},{"location":"triply-etl/transform/ratt/#geojson-and-well-known-text-wkt","text":"According to the GeoJSON standard , the only Coordinate Reference System (CRS) that is allowed to be used is EPSG:4326/WGS84. In practice, source data sometimes (incorrectly) stores GeoJSON formatted data in other CRSes. An example of this is the GISCO dataset of the European Union, which uses the EPSG:3857 CRS. For cases like these, the optional crs parameter comes in handy.","title":"GeoJSON and Well-Known Text (WKT)"},{"location":"triply-etl/transform/ratt/#see-also_4","text":"The GeoJSON format is standardized in RFC 7946 . The Well-Known Text (WKT) serialization format is standardized as part of ISO/IEC 13249-3:2016 standard .","title":"See also"},{"location":"triply-etl/transform/ratt/#example_8","text":"The following snippet converts GeoJSON objects that denote traffic light locations to their GeoSPARQL representation. fromJson([ { id: '123', geometry: { type: 'Point', coordinates: [6.256, 48.480], }, }, ]), addIri({ prefix: prefix.feature, content: 'id', key: '_feature', }), geojsonToWkt({ content: 'geometry', crs: epsg[3857], key: '_wkt', }), addHashedIri({ prefix: prefix.geometry, content: '_wkt', key: '_geometry' }), pairs('_feature', [a, def.TrafficLight], [geo.hasGeometry, '_geometry'], ), pairs('_geometry', [a, geo.Geometry], [geo.asWKT, literal('_wkt', geo.wktLiteral)], ), This results in the following linked data assertions: feature:123 a def:TrafficLight; geo:hasGeometry geometry:197e6376c2bd8192c24911f88c330606. geometry:197e6376c2bd8192c24911f88c330606 a geo:Geometry; geo:asWKT 'Point(6.256 48.480)'^^geo:wktLiteral. Or diagrammatically: graph LR feature -- a --> TrafficLight feature -- geo:hasGeometry --> geometry geometry -- a --> Geometry geometry -- geo:asWKT --> wkt Geometry[geo:Geometry]:::model TrafficLight[def:TrafficLight]:::model feature(feature:123):::data geometry(geometry:197e6376c2bd8192c24911f88c330606):::data wkt(\"'Point(6.256 48.480)'^^geo:wktLiteral\"):::data","title":"Example"},{"location":"triply-etl/transform/ratt/#jpath","text":"","title":"jpath()"},{"location":"triply-etl/transform/ratt/#description_3","text":"Filters a value based on a JSON Path expression. JSON Path is a query language for JSON. For the syntax of JSON Path expressions, please visit the JSON Path documentation page .","title":"Description"},{"location":"triply-etl/transform/ratt/#use-cases","text":"This function simplifies the complex key specification to filter specific values. It can only be used for an object of a triple to create a literal. The result of a function must have a fundamental type.","title":"Use cases"},{"location":"triply-etl/transform/ratt/#parameters_10","text":"value A JSON Path expression.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_9","text":"The following examples will create a literal based on key value : If key 'ISO_639-2' exists: fromJson({ language: [ { 'ISO_639-1': 'en', lcid: 2057, value: 'Paris' }, { 'ISO_639-1': 'nl', 'ISO_639-2': 'nld', lcid: 1043, value: 'Parijs' }, ], }), triple( iri(prefix.city, '$recordId'), rdfs.label, literal(jpath(\"$.language[?(@['ISO_639-2'])].value\"), language.nl) ), If key 'ISO_639-1' is equal to nl : fromJson({ language: [ { 'ISO_639-1': 'en', lcid: 2057, value: 'Paris' }, { 'ISO_639-1': 'nl', 'ISO_639-2': 'nld', lcid: 1043, value: 'Parijs' }, ], }), triple( iri(prefix.city, '$recordId'), rdfs.label, literal(jpath(\"$.language[?(@['ISO_639-1'] =='nl')].value\"), language.nl) ), If key 'lcid' is lower than 1,100: fromJson({ language: [ { 'ISO_639-1': 'en', lcid: 2057, value: 'Paris' }, { 'ISO_639-1': 'nl', 'ISO_639-2': 'nld', lcid: 1043, value: 'Parijs' }, ], }), triple( iri(prefix.city, '$recordId'), rdfs.label, literal(jpath('$.language[?(@.lcid < 1100)].value'), language.nl) ), All three examples generate the following linked data: record:1 rdfs:label 'Parijs'@nl.","title":"Example"},{"location":"triply-etl/transform/ratt/#lowercase","text":"","title":"lowercase()"},{"location":"triply-etl/transform/ratt/#description_4","text":"Translates a string value to its lowercase variant. This middleware can lowercase strings in any language; the Unicode Default Case Conversion algorithm is used.","title":"Description"},{"location":"triply-etl/transform/ratt/#use-cases_1","text":"Older data formats sometimes use uppercase letters for header names or codes. The lowercase transformation middleware may be used to change such string values to lowercase.","title":"Use cases"},{"location":"triply-etl/transform/ratt/#parameters_11","text":"content A key that contains a string value. key A new key where the lowercase variant is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_10","text":"The following snippet starts out with header values that use uppercase characters exclusively. The lowerCase transformation is used to create lowercase names that can be used to create property IRIs. fromJson([ { from: '1', rel: 'PARENT', to: '2' }, { from: '2', rel: 'CHILD', to: '1' }, ]), lowercase({ content: 'rel', key: '_relationship', }), triple( iri(prefix.id, 'from'), iri(prefix.def, '_relationship'), iri(prefix.id, 'to'), ), This results in the following linked data assertions: id:1 def:parent id:2. id:2 def:child id:1.","title":"Example"},{"location":"triply-etl/transform/ratt/#padend","text":"","title":"padEnd()"},{"location":"triply-etl/transform/ratt/#description_5","text":"Adds a given padding string zero or more times to the end of a string value, until the resulting string value is exactly a given number of characters long.","title":"Description"},{"location":"triply-etl/transform/ratt/#use-cases_2","text":"This transformation is useful for identifiers that must have fixed length and that may be suffixed by zero's.","title":"Use cases"},{"location":"triply-etl/transform/ratt/#parameters_12","text":"content A key that contains a string value. If the key contains a numeric value, that value is first cast to string. padString The string that is added to the end of the string value in key content , until the result string has exactly targetLength characters. Can be a static string or a key. targetLength The exact number of characters that the resulting string should have. The string value is copied over as-is when targetLength is smaller than or equal to the length of the string value in key content . This includes cases where targetLength is negative or zero. key A new key where the padded string is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_11","text":"The following snippet processes identifiers of varying length, and ensures that they have the same length after suffixing '0' characters. fromJson([ { id: '16784' }, { id: '129' }, ]), padEnd({ content: 'id', padString: '0', targetLength: 6, key: '_id', }), This results in the following two Records: [ { \"id\": \"16784\", \"_id\": \"167840\" }, { \"id\": \"129\", \"_id\": \"129000\" } ]","title":"Example"},{"location":"triply-etl/transform/ratt/#padstart","text":"","title":"padStart()"},{"location":"triply-etl/transform/ratt/#description_6","text":"Adds a given padding string zero or more times in front of a string value, until the resulting string value is exactly a given number of characters long.","title":"Description"},{"location":"triply-etl/transform/ratt/#use-cases_3","text":"This transformation is useful for identifiers that must have fixed length and that may be prepended by zero's. If key content contains a numeric value, then that value is first cast to string. content A key that contains a string value. padString The string that is added in front of the string value in key content , until the result string has exactly targetLength characters. targetLength The exact number of characters that the resulting string should have. The string value is copied over as-is when targetLength is smaller than or equal to the length of the string value in key content . This includes cases where targetLength is negative or zero. key A new key where the lowercased string is stored.","title":"Use cases"},{"location":"triply-etl/transform/ratt/#example-fixed-length-identifiers","text":"The following snippet processes identifiers of varying length, and ensures that they have the same length after prefixing '0' characters. fromJson([ { id: '16784' }, { id: '129' }, ]), padStart({ content: 'id', padString: '0', targetLength: 6, key: '_id', }), This results in the following two records: [ { \"id\": \"16784\", \"_id\": \"016784\" }, { \"id\": \"129\", \"_id\": \"000129\" } ]","title":"Example: Fixed-length identifiers"},{"location":"triply-etl/transform/ratt/#example-create-year-literals","text":"In order to create standards-conforming temporal literal, we need to pad the year component to be at least 4 decimal digits long. (This requirement is defined in the XML Schema Datatypes 1.1: Part 2 Datatypes standard.) Suppose that the source data looks as follows: Artifact Year 0001 612 0002 1702 We can ensure that all years have at least 4 decimal digits by calling the following function: padStart({ content: 'Year', padString: '0', targetLength: 4, key: '_lexicalForm', }), triple( iri(prefix.id, 'Artifact'), dct.created, literal('_lexicalForm', xsd.gYear), ), This makes the following linked data assertions: id:0001 dct:created '0612'^^xsd:gYear. id:0002 dct:created '1702'^^xsd:gYear.","title":"Example: Create year literals"},{"location":"triply-etl/transform/ratt/#replace","text":"","title":"replace()"},{"location":"triply-etl/transform/ratt/#description_7","text":"Performs a regular expression replacement to the given input string, and stores the result in a new key.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_13","text":"content A key that contains a string value, or a static string specified with assertion str() . from A JavaScript Regular Expression . to Optionally, a string that replaces potential matches of the Regular Expression ( from ). Use $1 , $2 , etc. to insert matches. If absent, the empty string is used. key A new key where the result of the replacement is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_12","text":"Suppose the source data contains date/time strings, but only the date component is needed: { \"created\": \"2020-01-02T00:00:00.0Z\" } It is possible to extract only the date part (everything up to the T ) in the following way: replace({ content: 'created', from: /^([^T]*).*$/, to: '$1', key: '_created', }), triple('_creativeWork', dct.created, literal('_created', xsd.date)), This results in the following Record: { \"created\": \"2020-01-02T00:00:00.0Z\", \"_created\": \"2020-01-02\" }","title":"Example"},{"location":"triply-etl/transform/ratt/#split","text":"","title":"split()"},{"location":"triply-etl/transform/ratt/#description_8","text":"Splits a string into an array of strings, and stores that array in a new key.","title":"Description"},{"location":"triply-etl/transform/ratt/#whitespace-handling","text":"This transformation removes any trailing whitespace that remains after the strings are split. This ensures that irregular use of whitespace in the source data is taken care of automatically.","title":"Whitespace handling"},{"location":"triply-etl/transform/ratt/#empty-values","text":"This transformation removes any elements of the splitted string that are empty (after trimmimng). To keep empty entries, use the `` flag.","title":"Empty values"},{"location":"triply-etl/transform/ratt/#use-cases_4","text":"The transformation is used when: - Tabular source data encodes multiple values inside singular cells. (Such concatenated storage inside cells is a data quality issue, because the table format cannot guarantee that the separator character does not (accidentally) occur inside individual values inside a cell. Tree-shaped source formats are able to store multiple values for the same key reliably, e.g. JSON and XML.) - Source data contains complex string values that can be decomposed into stand-alone components with distinct meaning.","title":"Use cases"},{"location":"triply-etl/transform/ratt/#parameters_14","text":"content A key that stores a string, or a string specified with assertion str() . separator A string or a regular expression that is used to separate the content. key A new key where the array of split strings is stored. keepEmptyEntities A boolean flag indicating if the empty values of a splitted string should be kept or not. By default empty values are removed.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example-multiple-values-in-singular-table-cells","text":"Tabular formats are unable to store more than one value in a cell. Because of this limitation, tabular data sources sometimes encode multiple values in cells by encoding them into one string. A separator character is typically used to distinguish between these multiple values. Suppose that the source data looks as follows: Parent Child John Jane, Jake , Kate ,, The following transformation splits the cells that encode zero or more children for each parent: split({ content: 'Child', separator: ',', key: 'Children', }), This results in the following transformed records: { \"Parent\": \"John\", \"Child\": \"Jane, Jake , \", \"Children\": [ \"Jane\", \"Jake\" ] } { \"Parent\": \"Kate\", \"Child\": \",, \", \"Children\": [] } Notice that trailing whitespace and empty values are dealt with automatically. Since the split() transformation always results in an array of strings, we can use the term assertion iris() afterwards: split({ content: 'children', separator: ',', key: '_children', }), triple( iri(prefix.person, 'parent'), sdo.children, iris(prefix.person, '_children') ), This results in the following linked data assertions: person:johndoe sdo:children person:janedoe, person:jakedoe.","title":"Example: Multiple values in singular table cells"},{"location":"triply-etl/transform/ratt/#example-split-a-complex-string-into-components","text":"The following snippet uses a regular expression to split a KIX code. (A KIX code is a standardized format for representing postal addresses in The Netherlands.) fromJson([{ id: '1', KIX: '1231FZ13Xhs' }]), split({ content: 'KIX', separator: /^(\\d{4}[A-Z]{2})(\\d{1,5})(?:X(.{1,6}))/, key: 'KIX_components', }), triple(iri(prefix.id, 'id'), sdo.postalCode, 'KIX_components[1]'), This results in the following record: { \"id\": \"1\", \"KIX\": \"1231FZ13Xhs\", \"KIX_components\": [\"\", \"1231FZ\", \"13\", \"hs\", \"\"] } And in the following linked data assertion: id:1 sdo:postalCode '1231FZ'.","title":"Example: Split a complex string into components"},{"location":"triply-etl/transform/ratt/#substring","text":"","title":"substring()"},{"location":"triply-etl/transform/ratt/#description_9","text":"This middleware takes a substring from the input string and stores the result in a new key.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_15","text":"content A key that stores a string value, or a string specified with assertion str() . start The index of the first character that is included in the substring. The first character has index 0. end Optionally, the index of the first character that is excluded from the substring. If absent, the substring ends at the end of the source string. key The new key in which the substring is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_13","text":"The Library of Congress MARC format stores the type of record in the sixth character that appears in the leader key. We use substring() to extract this character, and then use transformation translateAll() to map them to a corresponding class IRI: substring({ content: 'metadata.marc:record.marc:leader.$text', start: 6, end: 7, key: '_typeOfRecord', }), translateAll({ content: '_typeOfRecord', table: { a: dcm.Text, k: dcm.StillImage, }, key: '_class', }), triple('_iri', a, '_class'),","title":"Example"},{"location":"triply-etl/transform/ratt/#translateall","text":"","title":"translateAll()"},{"location":"triply-etl/transform/ratt/#description_10","text":"Translates all dynamic strings from a specific key to new values of an arbitrary type To , according to a specified translation table. Since this function translates all values, the mapped values can have any type T ; they do not need to be strings. For example, this allows strings to be translated to IRIs or to literals.","title":"Description"},{"location":"triply-etl/transform/ratt/#when-to-use_3","text":"This approach is used when: The set of source data values is small. The set of source data values is known ahead of time. The corresponding linked data terms are known ahead of time. The appearance of a new value is considered to be an error in the source data.","title":"When to use?"},{"location":"triply-etl/transform/ratt/#parameters_16","text":"content A key that contains a string value. table A translation table from strings to values of some arbitrary type T . nulls Optionally, a list of string values that are considered denote NULL values in the source data. When a NULL value is encountered, the special value undefined is added for the target key . default Optionally, a default value or a default value-determining function that is used for string values that are neither in the translation table ( table ) nor in the NULL values list ( nulls ). The function must return a value of type T . Use of a default value value is equivalent to using the following value-determining function: _ => value . key A new key where the results of the translation are stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example-map-source-data-to-iri-values","text":"Suppose that source data contains country names. In linked data we want to use IRIs to denote countries, so that we can link additional information. Since the list of countries that appears in the source data is not that long, we can specify a translation table from names to IRIs by hand: change.translateAll({ content: 'country', table: { 'Belgium': country.be, 'Germany': country.de, 'England': country.gb, ..., }, nulls: ['Unknown'], key: '_country', }), when('country', [ triple('_country', a, sdo.Country), ]),","title":"Example: Map source data to IRI values"},{"location":"triply-etl/transform/ratt/#example-map-source-data-to-iri-properties","text":"When we relate a creative work to its creator, we sometimes know whether the creator was the actor, architect, author, etc. of the creative work. But in other cases we only know that there is a generic creator relationship. The Library of Congress Relators vocabulary allows us to express specific and generic predicates of this kind. transform.translateAll({ table: { 'actor': rel.act, 'architect': rel.arc, 'author': rel.aut, ..., }, default: rel.oth, // generic relator key: '_relator', }), triple('_creativeWork', '_relator', '_creator'),","title":"Example: Map source data to IRI properties"},{"location":"triply-etl/transform/ratt/#translatesome","text":"","title":"translateSome()"},{"location":"triply-etl/transform/ratt/#description_11","text":"Translates some strings, according to the specified translation table, to other strings. Strings that are not translated according to the translation table are copied over as-is.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_17","text":"content A key that contains a string value. table A translation table that specifies translations from strings to strings. key A new key where the translated strings are stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#use-cases_5","text":"Source data often contains some strings that are correct and some that are incorrect. For example, if source data contains a key with city names, some of the names may be misspelled. In such cases, translateSome() can be used to translate the incorrect strings into correct ones. A translateSome() transformation is often performed directly before a translateAll() transformation. The former ensures that all string values are correct (e.g. fixing typo's in city names); the latter ensures that all strings are mapped onto IRIs (e.g. city names mapped onto city-denoting IRIs).","title":"Use cases"},{"location":"triply-etl/transform/ratt/#example_14","text":"The following example fixes an encoding issue that occurs in the source data: transform.translateSome({ content: 'name', table: { 'Frysl\ufffd\ufffdn': 'Frysl\u00e2n', // Other entries for typographic fixes go here. ..., }, key: '_name', }),","title":"Example"},{"location":"triply-etl/transform/ratt/#tryliteral","text":"","title":"tryLiteral()"},{"location":"triply-etl/transform/ratt/#description_12","text":"This transformation is used when string values must be mapped onto literals with varying datatype IRIs. The datatype IRIs that could apply are specified in a list. The specified datatype IRIs are tried out from left to right. The first datatype IRI that results in a valid literal is chosen. content A key that contains a string value, or a string value specified with assertion str() . datatypes An array of two or more datatype IRIs. key A new key where the created literal is stored.","title":"Description"},{"location":"triply-etl/transform/ratt/#throws_1","text":"An exception is emitted if a string value does not belong to the lexical space of any of the specified datatype IRIs.","title":"Throws"},{"location":"triply-etl/transform/ratt/#example_15","text":"A literal is valid if the given string value appears in the lexical space of a specific datatype IRI. This is best explained with an example: tryLiteral({ content: 'date', datatypes: [xsd.date, xsd.gYearMonth, xsd.gYear], key: '_publicationDate', }), Source data in key 'date' Result in key '_date' '1900-01-02' '1900-01-02'^^xsd:date '1900' '1900'^^xsd:gYear '02-01-1900' An error is emitted. If we do not want to emit errors for string values that cannot be satisfy any of the specified datatype IRIs, we may choose to include xsd.string as the last datatype IRI in the list. Do notice however that this will result in dates that cannot be compared on a timeline, since they were not transformed to an XSD date/time datatype.","title":"Example"},{"location":"triply-etl/transform/ratt/#see-also_5","text":"You only need to use tryLiteral() if the datatype IRI varies from record to record. If the datatype IRI is the same for every record, then the regular assertion function literal() should be used instead.","title":"See also"},{"location":"triply-etl/transform/ratt/#uppercase","text":"","title":"uppercase()"},{"location":"triply-etl/transform/ratt/#description_13","text":"Translates a string value to its uppercase variant. This middleware can uppercase strings in any language; the Unicode Default Case Conversion algorithm is used for this.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_18","text":"content A key that contains a string value. key A new key where the uppercase variant is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_16","text":"In the following example, the string in the key 'countryCode' becomes the uppercase string: fromJson({ place: 'Amsterdam', countryCode: 'nl' }), uppercase({ content: 'countryCode', key: '_countryCode' }), triple(iri(prefix.id, 'place'), iri(prefix.geonames, str('countryCode')), '_countryCode') This results in the following linked data assertion: city:Amsterdam geonames:countryCode \"NL\"","title":"Example"},{"location":"triply-etl/transform/ratt/#wktAddPoint()","text":"","title":"wkt.addPoint()"},{"location":"triply-etl/transform/ratt/#description_14","text":"Creates a Well-Known Text (WKT) serialization string from the corresponding geospatial point.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_19","text":"latitude A key or a string assertion ( str() ) with latitude. longitude A key or a string assertion ( str() ) with longitude. crs Optionally, an IRI that denotes a Coordinate Reference System (CRS). You can use IRIs from the epsg object. If absent, uses EPSG:4326/WGS84 as the CRS. key A new key where the WKT string is stored.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_17","text":"The following example creates a WKT literal from the geo coordinates of Amsterdam: fromJson({ place: 'Amsterdam', lat: 52.37308, long: 4.89245 }), wkt.addPoint({ latitude: 'lat', longitude: 'long', key: '_point' }), triple(iri(prefix.city, 'place'), geo.asWKT, '_point'), This results in the following record of the key '_point' : { \"_point\": { \"termType\": \"Literal\", \"value\": \"Point (52.37308 4.89245)\", \"language\": \"\", \"datatype\": { \"termType\": \"NamedNode\", \"value\": \"http://www.opengis.net/ont/geosparql#wktLiteral\", \"validationStatus\": \"canonical\" }, \"validationStatus\": \"canonical\" } } And in the following linked data assertion: city:Amstedam geo:asWKT \"Point (52.37308 4.89245)\"^^geo:wktLiteral","title":"Example"},{"location":"triply-etl/transform/ratt/#wktProject()","text":"","title":"wkt.project()"},{"location":"triply-etl/transform/ratt/#description_15","text":"Converts the projection of a Well-Known Text (WKT) literal from one Coordinate Reference System to another one.","title":"Description"},{"location":"triply-etl/transform/ratt/#parameters_20","text":"content An array of keys or numbers. key A new key where the new projection is stored. fromCrs : an IRI that denotes a Coordinate Reference System (CRS) of the content . toCrs : Optionally, an IRI that denotes a Coordinate Reference System (CRS) we want to convert to. If absent, uses EPSG:4326/WGS84 as the CRS.","title":"Parameters"},{"location":"triply-etl/transform/ratt/#example_18","text":"The following example converts an array with latitude and longitude in content key from Dutch grid coordinates (Rijksdriehoeks-coordinates) to WGS84 coordinates. fromJson({ place: 'Amsterdam', lat: 121307, long: 487360 }), wkt.project({ content: ['lat', 'long'], key: '_coordinates', fromCrs: epsg[666], toCrs: epsg[4326] }), This results in the following record of the key '_coordinates' : { \"_coordinates\": [ 52.374671935135474, 4.892803721020475 ] } We can now use the converted result to create a WKT Point() using addPoint() : wkt.addPoint({ latitude: '_coordinates[0]', longitude: '_coordinates[1]', key: '_point' }), triple(iri(prefix.id, 'place'), geo.asWKT, '_point') This code snippet creates the following linked data assertion: city:Amstedam geo:asWKT \"Point (52.374671935135474 4.892803721020475)\"^^geo:asWKT","title":"Example"},{"location":"triply-etl/transform/rml/","text":"On this page: RML Transformations Configuration components A simple example RML Transformations \u00b6 The RDF Mapping Language (RML) is an ETL configuration language for linked data. RML mappings are applied to the TriplyETL Record. Configuration components \u00b6 RML mappings contain the following configuration components: Logical Source : Defines the source of the data to be transformed. It includes information about the data format (csv, xml, json, etc.), location, and access methods. Logical sources can represent various types of data sources, such as files, databases, or web services. Triples Map : The mapping rules that are used to convert data from a Logical Source to linked data. It defines how data should be transformed and specifies the subject, predicate and object terms of the generated statements. Subject Map : The part of the Triples Map that defines how the subjects of the generated linked data statements must be constructed. It specifies the subject's term type, which can be blank node, IRI or literal. It often includes the class of which the subject term is an instance. Predicate Object Map : The part of the Triples Map that defines how the predicate and objects are mapped. A simple example \u00b6 The following full TriplyETL script applies the RML mappings specified in map.trig to the in-line specified source data record: import { logQuads } from '@triplyetl/etl/debug' import { Etl, fromJson, Source } from '@triplyetl/etl/generic' import { map } from '@triplyetl/etl/rml' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([{ name: 'John' }]), map(Source.file('map.trig')), logQuads(), ) return etl } The contents of file map.trig specify how the data will be mapped: prefix ql: prefix rml: prefix rr: prefix sdo: [] rml:logicalSource [ rml:source '$Record.json'; rml:referenceFormulation ql:JSONPath; rml:iterator '$' ]; rr:subjectMap [ rr:termType rr:BlankNode; rr:class sdo:Person ]; rr:predicateObjectMap [ rr:predicate sdo:firstName; rr:objectMap [ rml:reference 'name' ] ]. The Logical Source component specifies that the TriplyETL Record should be used: [] rml:logicalSource [ rml:source '$Record.json'; rml:referenceFormulation ql:JSONPath; rml:iterator '$' ]; The Subject Map specifies that the subject term is a blank node that is an instance of class sdo:Person : [] rr:subjectMap [ rr:termType rr:BlankNode; rr:class sdo:Person ]; The Predicate Object Map specifies that the value of key 'name' should be used together with the property sdo:firstName : [] rr:predicateObjectMap [ rr:predicate sdo:firstName; rr:objectMap [ rml:reference 'name' ] ]. Running the TriplyETL script results in the following linked data: a sdo:Person; sdo:firstName 'John'.","title":"RML"},{"location":"triply-etl/transform/rml/#rml-transformations","text":"The RDF Mapping Language (RML) is an ETL configuration language for linked data. RML mappings are applied to the TriplyETL Record.","title":"RML Transformations"},{"location":"triply-etl/transform/rml/#configuration-components","text":"RML mappings contain the following configuration components: Logical Source : Defines the source of the data to be transformed. It includes information about the data format (csv, xml, json, etc.), location, and access methods. Logical sources can represent various types of data sources, such as files, databases, or web services. Triples Map : The mapping rules that are used to convert data from a Logical Source to linked data. It defines how data should be transformed and specifies the subject, predicate and object terms of the generated statements. Subject Map : The part of the Triples Map that defines how the subjects of the generated linked data statements must be constructed. It specifies the subject's term type, which can be blank node, IRI or literal. It often includes the class of which the subject term is an instance. Predicate Object Map : The part of the Triples Map that defines how the predicate and objects are mapped.","title":"Configuration components"},{"location":"triply-etl/transform/rml/#a-simple-example","text":"The following full TriplyETL script applies the RML mappings specified in map.trig to the in-line specified source data record: import { logQuads } from '@triplyetl/etl/debug' import { Etl, fromJson, Source } from '@triplyetl/etl/generic' import { map } from '@triplyetl/etl/rml' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([{ name: 'John' }]), map(Source.file('map.trig')), logQuads(), ) return etl } The contents of file map.trig specify how the data will be mapped: prefix ql: prefix rml: prefix rr: prefix sdo: [] rml:logicalSource [ rml:source '$Record.json'; rml:referenceFormulation ql:JSONPath; rml:iterator '$' ]; rr:subjectMap [ rr:termType rr:BlankNode; rr:class sdo:Person ]; rr:predicateObjectMap [ rr:predicate sdo:firstName; rr:objectMap [ rml:reference 'name' ] ]. The Logical Source component specifies that the TriplyETL Record should be used: [] rml:logicalSource [ rml:source '$Record.json'; rml:referenceFormulation ql:JSONPath; rml:iterator '$' ]; The Subject Map specifies that the subject term is a blank node that is an instance of class sdo:Person : [] rr:subjectMap [ rr:termType rr:BlankNode; rr:class sdo:Person ]; The Predicate Object Map specifies that the value of key 'name' should be used together with the property sdo:firstName : [] rr:predicateObjectMap [ rr:predicate sdo:firstName; rr:objectMap [ rml:reference 'name' ] ]. Running the TriplyETL script results in the following linked data: a sdo:Person; sdo:firstName 'John'.","title":"A simple example"},{"location":"triply-etl/transform/typescript/","text":"On this page: TypeScript Context Function custom.add() Function signature Error conditions See also Example: Numeric calculations Function custom.change() Function signature Error conditions Example: Numeric calculation Example: Cast numeric data Example: Variant type Example: String or object custom.replace() Function signature Error conditions See also TypeScript \u00b6 The vast majority of ETLs can be written with the core set of RATT Transformations . But sometimes a custom transformation is necessary that cannot be handled by this core set. For such circumstances, TriplyETL allows a custom TypeScript function to be written. Notice that the use of a custom TypeScript function should be somewhat uncommon. The vast majority of real-world transformations should be supported by the core set of RATT Transformations. Context \u00b6 Custom TypeScript functions have access to various resources inside the TriplyETL. These resources include, but are not limited to, the full Record and the full Internal Store. TriplyETL refers to these resources as the Context . context.app The TriplyETL pipeline object. context.getX Tetrieves the value of a specific key in the Record and assumes it has type X , e.g. getAny() , getNumber() , getString() . context.record The current Record. context.store The Internal Store. Function custom.add() \u00b6 Adds a new entry to the Record, based on more than one existing entry. The value of the entry is the result of an arbitrary TypeScript function that has access to the full Context . Function signature \u00b6 The custom.add function has the following signature: etl.use( custom.add({ value: context => FUNCTION_BODY, key: 'NEW_KEY', }), ) The function can be configured in the following ways: - FUNCTION_BODY the body of a function, taking the Context as its input parameter ( context) and ending with a return statement returning the newly added value. - NEW_KEY must be the name of a new entry in the Record. Error conditions \u00b6 This function emits an error if NEW_KEY already exists in the current Record. See also \u00b6 Notice that it is bad practice to use custom.add() for adding a new entry that is based on exactly one existing entry. In such cases, the use of function custom.copy() Example: Numeric calculations \u00b6 Suppose the source data contains a numeric balance and a numeric rate. We can use function custom.add() to calculate the interest and store it in a new key: import { Etl, fromJson } from '@triplyetl/etl/generic' import { custom, logRecord } from '@triplyetl/etl/ratt' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { balance: 100, rate: 0.1 }, { balance: 200, rate: 0.2 } ]), custom.add({ value: context => context.getNumber('balance') * context.getNumber('rate'), key: 'interest', }), logRecord(), ) return etl } This prints the following two records: { \"balance\": 100, \"rate\": 0.1, \"interest\": 10 } { \"balance\": 200, \"rate\": 0.2, \"interest\": 40 } Function custom.change() \u00b6 Changes an existing entry in the Record. The change function takes the old value and returns the new value. Function signature \u00b6 This function has the following signature: etl.use( custom.change({ key: 'KEY_NAME', type: 'VALUE_TYPE', change: value => FUNCTION_BODY, }), ) The function can be configured in the following way: - KEY_NAME must be the name of a key in the record. - VALUE_TYPE must be one of the following type-denoting strings: - 'array' an array whose elements have type any . - 'boolean' a Boolean value ( true or false ). - 'iri' a universal identifier / IRI term. - 'literal' an RDF literal term. - 'number' a natural number or floating-point number. - 'string' a sequence of characters. - 'unknown' an unknown type. - FUNCTION_BODY a function body that returns the new value. Error conditions \u00b6 This function emits an error if the specified key ( KEY_NAME ) does not exist in the RATT record. Use custom.copy() if you want to create a new entry based on an existing one. Example: Numeric calculation \u00b6 Suppose the source data contains a balance in thousands. We can use function custom.change() to multiply the balance inplace: import { Etl, fromJson } from '@triplyetl/etl/generic' import { custom, logRecord } from '@triplyetl/etl/ratt' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([{ balance: 100 }, { balance: 200 }]), custom.change({ change: value => 1_000 * value, type: 'number', key: 'balance', }), logRecord(), ) return etl } This prints the following two records: { \"balance\": 100000 } { \"balance\": 200000 } Notice that the values for the balance keys were changed. Example: Cast numeric data \u00b6 Some source data formats are unable to represent numeric data. A good example are the CSV and TSV formats, where every cell value is represented as a string. If such a source data format that cannot represent numeric data is used, it is often useful to explicitly cast string values to numbers. For example, assume the following input table that uses strings to encode the number of inhabitants for each country: Country Inhabitants France '' Germany '83190556' Italy 'empty' Netherlands '17650200' We can use the custom.change() function to cast the values stored in the 'Inhabitants' key to numbers in the following way: custom.change({ change: value => +(value as number), type: 'unknown', key: 'Inhabitants', }), Notice that the type must be set to 'unknown' because a string is not allowed to be cast to a number in TypeScript (because not every string can be cast to a number). After custom.change() has been applied, the record looks as follows: Country Inhabitants France 0 Germany 83190556 Italy null Netherlands 17650200 Notice that strings that encode a number are correctly transformed, and non-empty strings that do not encode a number are transformed to null . Most of the time, this is the behavior that you want in a linked data pipeline. Also notice that the empty string is cast to the number zero. Most of the time, this is not what you want. If you want to prevent this transformation from happening, and you almost certainly do, you must process the source data conditionally, using control structures . Example: Variant type \u00b6 A variant is a value that does not always have the same type. Variants may appear in dirty source data, where a value is sometimes given in one way and sometimes in another. In such cases, the type parameter must be set to 'unknown' . Inside the body of the change function we first cast the value to a variant type. In TypeScript the notation for this is a sequence of types separated by the pipe ( | ) character. Finally, the typeof operator is used to clean the source data to a uniform type that is easier to process in the rest of the ETL. The following code snippet processes source data where the balance is sometimes specified as a number and sometimes as a string: import { Etl, fromJson } from '@triplyetl/etl/generic' import { custom, logRecord } from '@triplyetl/etl/ratt' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([{ balance: 100 }, { balance: \"200\" }]), custom.change({ key: 'balance', type: 'unknown', change: value => { const tmp = value as number | string switch (typeof tmp) { case 'number': return value as number case 'string': return parseInt(value as string) } }, }), logRecord(), ) return etl } This prints the following two records, where the balance is now always a number that can be uniformly processed: { \"balance\": 100 } { \"balance\": 200 } Example: String or object \u00b6 In the following example the name of a person is sometimes given as a plain string and sometimes as an object with a fistName and a lastName key: The following function transforms this variant to a uniform string type: import { Etl, fromJson } from '@triplyetl/etl/generic' import { custom, logRecord } from '@triplyetl/etl/ratt' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { name: 'John Doe' }, { name: { firstName: 'Jane', lastName: 'Doe' } } ]), custom.change({ key: 'name', type: 'unknown', change: value => { const tmp = value as { firstName: string, lastName: string } | string switch (typeof tmp) { case 'string': return tmp case 'object': return tmp.firstName + ' ' + tmp.lastName } }, }), logRecord(), ) return etl } This print the following two records that can now be uniformly processed: { \"name\": \"John Doe\" } { \"name\": \"Jane Doe\" } custom.replace() \u00b6 Replaces the value of an existing key based on the value from another key. Function signature \u00b6 The custom.replace() function has the following signature: etl.use( custom.replace({ fromKey: 'FROM_KEY', type: 'VALUE_TYPE', change?: value => FUNCTION_BODY, toKey: 'FROM_TYPE', }), ) fromKey is the name of the key whose value is going to be used to replace the old value with. type is the name of the type of the value in fromKey . The change key optionally specifies a function that takes the cast value of fromKey and that returns the value that will be stored in toKey . If the change function is not specified, it is identical to value => value . toKey is the name of the existing key whose value is going to be replaced. Error conditions \u00b6 This function emits an error under the following conditions: - fromKey does not specify a key name that exists in the current Record. - toKey does not specify a key name that exists in the current Record. - fromKey and toKey are the same. See also \u00b6 If fromKey and toKey are the same, then function custom.change() must be used instead.","title":"TypeScript"},{"location":"triply-etl/transform/typescript/#typescript","text":"The vast majority of ETLs can be written with the core set of RATT Transformations . But sometimes a custom transformation is necessary that cannot be handled by this core set. For such circumstances, TriplyETL allows a custom TypeScript function to be written. Notice that the use of a custom TypeScript function should be somewhat uncommon. The vast majority of real-world transformations should be supported by the core set of RATT Transformations.","title":"TypeScript"},{"location":"triply-etl/transform/typescript/#context","text":"Custom TypeScript functions have access to various resources inside the TriplyETL. These resources include, but are not limited to, the full Record and the full Internal Store. TriplyETL refers to these resources as the Context . context.app The TriplyETL pipeline object. context.getX Tetrieves the value of a specific key in the Record and assumes it has type X , e.g. getAny() , getNumber() , getString() . context.record The current Record. context.store The Internal Store.","title":"Context"},{"location":"triply-etl/transform/typescript/#function-customadd","text":"Adds a new entry to the Record, based on more than one existing entry. The value of the entry is the result of an arbitrary TypeScript function that has access to the full Context .","title":"Function custom.add()"},{"location":"triply-etl/transform/typescript/#function-signature","text":"The custom.add function has the following signature: etl.use( custom.add({ value: context => FUNCTION_BODY, key: 'NEW_KEY', }), ) The function can be configured in the following ways: - FUNCTION_BODY the body of a function, taking the Context as its input parameter ( context) and ending with a return statement returning the newly added value. - NEW_KEY must be the name of a new entry in the Record.","title":"Function signature"},{"location":"triply-etl/transform/typescript/#error-conditions","text":"This function emits an error if NEW_KEY already exists in the current Record.","title":"Error conditions"},{"location":"triply-etl/transform/typescript/#see-also","text":"Notice that it is bad practice to use custom.add() for adding a new entry that is based on exactly one existing entry. In such cases, the use of function custom.copy()","title":"See also"},{"location":"triply-etl/transform/typescript/#example-numeric-calculations","text":"Suppose the source data contains a numeric balance and a numeric rate. We can use function custom.add() to calculate the interest and store it in a new key: import { Etl, fromJson } from '@triplyetl/etl/generic' import { custom, logRecord } from '@triplyetl/etl/ratt' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { balance: 100, rate: 0.1 }, { balance: 200, rate: 0.2 } ]), custom.add({ value: context => context.getNumber('balance') * context.getNumber('rate'), key: 'interest', }), logRecord(), ) return etl } This prints the following two records: { \"balance\": 100, \"rate\": 0.1, \"interest\": 10 } { \"balance\": 200, \"rate\": 0.2, \"interest\": 40 }","title":"Example: Numeric calculations"},{"location":"triply-etl/transform/typescript/#function-customchange","text":"Changes an existing entry in the Record. The change function takes the old value and returns the new value.","title":"Function custom.change()"},{"location":"triply-etl/transform/typescript/#function-signature_1","text":"This function has the following signature: etl.use( custom.change({ key: 'KEY_NAME', type: 'VALUE_TYPE', change: value => FUNCTION_BODY, }), ) The function can be configured in the following way: - KEY_NAME must be the name of a key in the record. - VALUE_TYPE must be one of the following type-denoting strings: - 'array' an array whose elements have type any . - 'boolean' a Boolean value ( true or false ). - 'iri' a universal identifier / IRI term. - 'literal' an RDF literal term. - 'number' a natural number or floating-point number. - 'string' a sequence of characters. - 'unknown' an unknown type. - FUNCTION_BODY a function body that returns the new value.","title":"Function signature"},{"location":"triply-etl/transform/typescript/#error-conditions_1","text":"This function emits an error if the specified key ( KEY_NAME ) does not exist in the RATT record. Use custom.copy() if you want to create a new entry based on an existing one.","title":"Error conditions"},{"location":"triply-etl/transform/typescript/#example-numeric-calculation","text":"Suppose the source data contains a balance in thousands. We can use function custom.change() to multiply the balance inplace: import { Etl, fromJson } from '@triplyetl/etl/generic' import { custom, logRecord } from '@triplyetl/etl/ratt' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([{ balance: 100 }, { balance: 200 }]), custom.change({ change: value => 1_000 * value, type: 'number', key: 'balance', }), logRecord(), ) return etl } This prints the following two records: { \"balance\": 100000 } { \"balance\": 200000 } Notice that the values for the balance keys were changed.","title":"Example: Numeric calculation"},{"location":"triply-etl/transform/typescript/#example-cast-numeric-data","text":"Some source data formats are unable to represent numeric data. A good example are the CSV and TSV formats, where every cell value is represented as a string. If such a source data format that cannot represent numeric data is used, it is often useful to explicitly cast string values to numbers. For example, assume the following input table that uses strings to encode the number of inhabitants for each country: Country Inhabitants France '' Germany '83190556' Italy 'empty' Netherlands '17650200' We can use the custom.change() function to cast the values stored in the 'Inhabitants' key to numbers in the following way: custom.change({ change: value => +(value as number), type: 'unknown', key: 'Inhabitants', }), Notice that the type must be set to 'unknown' because a string is not allowed to be cast to a number in TypeScript (because not every string can be cast to a number). After custom.change() has been applied, the record looks as follows: Country Inhabitants France 0 Germany 83190556 Italy null Netherlands 17650200 Notice that strings that encode a number are correctly transformed, and non-empty strings that do not encode a number are transformed to null . Most of the time, this is the behavior that you want in a linked data pipeline. Also notice that the empty string is cast to the number zero. Most of the time, this is not what you want. If you want to prevent this transformation from happening, and you almost certainly do, you must process the source data conditionally, using control structures .","title":"Example: Cast numeric data"},{"location":"triply-etl/transform/typescript/#example-variant-type","text":"A variant is a value that does not always have the same type. Variants may appear in dirty source data, where a value is sometimes given in one way and sometimes in another. In such cases, the type parameter must be set to 'unknown' . Inside the body of the change function we first cast the value to a variant type. In TypeScript the notation for this is a sequence of types separated by the pipe ( | ) character. Finally, the typeof operator is used to clean the source data to a uniform type that is easier to process in the rest of the ETL. The following code snippet processes source data where the balance is sometimes specified as a number and sometimes as a string: import { Etl, fromJson } from '@triplyetl/etl/generic' import { custom, logRecord } from '@triplyetl/etl/ratt' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([{ balance: 100 }, { balance: \"200\" }]), custom.change({ key: 'balance', type: 'unknown', change: value => { const tmp = value as number | string switch (typeof tmp) { case 'number': return value as number case 'string': return parseInt(value as string) } }, }), logRecord(), ) return etl } This prints the following two records, where the balance is now always a number that can be uniformly processed: { \"balance\": 100 } { \"balance\": 200 }","title":"Example: Variant type"},{"location":"triply-etl/transform/typescript/#example-string-or-object","text":"In the following example the name of a person is sometimes given as a plain string and sometimes as an object with a fistName and a lastName key: The following function transforms this variant to a uniform string type: import { Etl, fromJson } from '@triplyetl/etl/generic' import { custom, logRecord } from '@triplyetl/etl/ratt' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { name: 'John Doe' }, { name: { firstName: 'Jane', lastName: 'Doe' } } ]), custom.change({ key: 'name', type: 'unknown', change: value => { const tmp = value as { firstName: string, lastName: string } | string switch (typeof tmp) { case 'string': return tmp case 'object': return tmp.firstName + ' ' + tmp.lastName } }, }), logRecord(), ) return etl } This print the following two records that can now be uniformly processed: { \"name\": \"John Doe\" } { \"name\": \"Jane Doe\" }","title":"Example: String or object"},{"location":"triply-etl/transform/typescript/#customreplace","text":"Replaces the value of an existing key based on the value from another key.","title":"custom.replace()"},{"location":"triply-etl/transform/typescript/#function-signature_2","text":"The custom.replace() function has the following signature: etl.use( custom.replace({ fromKey: 'FROM_KEY', type: 'VALUE_TYPE', change?: value => FUNCTION_BODY, toKey: 'FROM_TYPE', }), ) fromKey is the name of the key whose value is going to be used to replace the old value with. type is the name of the type of the value in fromKey . The change key optionally specifies a function that takes the cast value of fromKey and that returns the value that will be stored in toKey . If the change function is not specified, it is identical to value => value . toKey is the name of the existing key whose value is going to be replaced.","title":"Function signature"},{"location":"triply-etl/transform/typescript/#error-conditions_2","text":"This function emits an error under the following conditions: - fromKey does not specify a key name that exists in the current Record. - toKey does not specify a key name that exists in the current Record. - fromKey and toKey are the same.","title":"Error conditions"},{"location":"triply-etl/transform/typescript/#see-also_1","text":"If fromKey and toKey are the same, then function custom.change() must be used instead.","title":"See also"},{"location":"triply-etl/transform/xslt/","text":"On this page: XSLT (Extensible Stylesheet Language Transformations) Example Input XML file (books.xml) XSLT Stylesheet (books-to-rdf.xsl) Output RDF (result.rdf) after applying XSLT Using XSLT in TriplyETL XSLT (Extensible Stylesheet Language Transformations) \u00b6 XSLT (Extensible Stylesheet Language Transformations) is a language used to transform and manipulate XML data. With XSLT, you have the capability to create rules and transformations that convert data documents into different formats or structures. Example \u00b6 Here's an example of an XML file, an XSLT stylesheet, and the resulting output in RDF format after applying the XSLT transformation. In this example, we'll transform a simple XML representation of books into RDF triples. Input XML file (books.xml) \u00b6 The Great Gatsby F. Scott Fitzgerald 1984 George Orwell XSLT Stylesheet (books-to-rdf.xsl) \u00b6 Output RDF (result.rdf) after applying XSLT \u00b6 F. Scott Fitzgerald The Great Gatsby George Orwell 1984 Using XSLT in TriplyETL \u00b6 In TriplyETL, XSLT processing is supported in the fromXML() and loadRdf() middlewares by providing an optional Source.file() to the stylesheet parameter that uses an XSL-XML Stylesheet. Below we will explain in steps how it can be used: 1. Create your XSLT stylesheet: First, you need to create an XSLT stylesheet. This stylesheet defines the rules for transforming your XML data. It should have a .xslt or .xsl file extension. You can create this stylesheet using any text editor or XML/XSLT development tool. 2. Load the data and apply XSLT transformation using either fromXml() or loadRdf() : fromXml() is used to load and transform xml to xml with different structure: fromXml(Source.file(xml), { selectors: 'rdf:RDF.sdo:Person', stylesheet: Source.file(stylesheet), }) loadRdf() is used to load and transform xml to xml/rdf to internal store: loadRdf(Source.file(xml), { contentType: 'application/rdf+xml', stylesheet: Source.file(xsl), }),","title":"XSLT"},{"location":"triply-etl/transform/xslt/#xslt-extensible-stylesheet-language-transformations","text":"XSLT (Extensible Stylesheet Language Transformations) is a language used to transform and manipulate XML data. With XSLT, you have the capability to create rules and transformations that convert data documents into different formats or structures.","title":"XSLT (Extensible Stylesheet Language Transformations)"},{"location":"triply-etl/transform/xslt/#example","text":"Here's an example of an XML file, an XSLT stylesheet, and the resulting output in RDF format after applying the XSLT transformation. In this example, we'll transform a simple XML representation of books into RDF triples.","title":"Example"},{"location":"triply-etl/transform/xslt/#input-xml-file-booksxml","text":" The Great Gatsby F. Scott Fitzgerald 1984 George Orwell ","title":"Input XML file (books.xml)"},{"location":"triply-etl/transform/xslt/#xslt-stylesheet-books-to-rdfxsl","text":" ","title":"XSLT Stylesheet (books-to-rdf.xsl)"},{"location":"triply-etl/transform/xslt/#output-rdf-resultrdf-after-applying-xslt","text":" F. Scott Fitzgerald The Great Gatsby George Orwell 1984 ","title":"Output RDF (result.rdf) after applying XSLT"},{"location":"triply-etl/transform/xslt/#using-xslt-in-triplyetl","text":"In TriplyETL, XSLT processing is supported in the fromXML() and loadRdf() middlewares by providing an optional Source.file() to the stylesheet parameter that uses an XSL-XML Stylesheet. Below we will explain in steps how it can be used: 1. Create your XSLT stylesheet: First, you need to create an XSLT stylesheet. This stylesheet defines the rules for transforming your XML data. It should have a .xslt or .xsl file extension. You can create this stylesheet using any text editor or XML/XSLT development tool. 2. Load the data and apply XSLT transformation using either fromXml() or loadRdf() : fromXml() is used to load and transform xml to xml with different structure: fromXml(Source.file(xml), { selectors: 'rdf:RDF.sdo:Person', stylesheet: Source.file(stylesheet), }) loadRdf() is used to load and transform xml to xml/rdf to internal store: loadRdf(Source.file(xml), { contentType: 'application/rdf+xml', stylesheet: Source.file(xsl), }),","title":"Using XSLT in TriplyETL"},{"location":"triply-etl/validate/","text":"On this page: Validate Validate \u00b6 The Validate step ensures that the linked data a pipeline produces conforms to the requirements specified in the data model. Every ETL should include the Validate step to ensure that only valid data is published in knowledge graphs. graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 4 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] TriplyETL supports the following two approaches for validating linked data: Graph Comparison uses one or more manually created 'gold records'. Graph comparison ensures that these records are transformed in the intended way by the ETL pipeline. SHACL Validation uses a generic data model. SHACL Validation ensures that each individual record is processed in accordance with the generic data model. Notice that it is possible to combine these two approaches in the same ETL pipeline: you can use graph comparison to test for specific conformities, and use SHACL to test for generic conformities.","title":"Overview"},{"location":"triply-etl/validate/#validate","text":"The Validate step ensures that the linked data a pipeline produces conforms to the requirements specified in the data model. Every ETL should include the Validate step to ensure that only valid data is published in knowledge graphs. graph LR sources -- 1. Extract --> record record -- 2. Transform --> record record -- 3. Assert --> ld ld -- 4. Enrich --> ld ld -- 5. Validate --> ld ld -- 6. Publish --> destinations linkStyle 4 stroke:red,stroke-width:3px; destinations[(\"D. Destinations\\n(TriplyDB)\")] ld[C. Internal Store] record[B. Record] sources[A. Data Sources] TriplyETL supports the following two approaches for validating linked data: Graph Comparison uses one or more manually created 'gold records'. Graph comparison ensures that these records are transformed in the intended way by the ETL pipeline. SHACL Validation uses a generic data model. SHACL Validation ensures that each individual record is processed in accordance with the generic data model. Notice that it is possible to combine these two approaches in the same ETL pipeline: you can use graph comparison to test for specific conformities, and use SHACL to test for generic conformities.","title":"Validate"},{"location":"triply-etl/validate/graph-comparison/","text":"On this page: Graph Comparison Graph comparison failure Graph comparison success Graph comparison for validation Step 1: Identify representative records Step 2: Manually create linked data Step 3: Implement the ETL Step 4. Call the graph comparison function Full script Options See also Graph Comparison \u00b6 Graph comparison is an approach for validating that the data produced by TriplyETL is identical to one or more manually specified graphs. Graph comparison failure \u00b6 The following full TriplyETL script shows how graph comparison failure is detected in TriplyETL: import { compareGraphs, Etl, loadRdf, Source } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string(`[]

.`)), compareGraphs(Source.string(`[]

. []

.`)), ) return etl } Function loadRdf() loads the following linked data into the Internal Store: graph LR _:1 -- p --> o. Function compareGraphs() compares the contents of the Internal Store to the following linked data: graph LR _:2a -- p --> o. _:2b -- p --> o. Notice that these two graphs are not isomorphic: It is possible to map _:2a and _:2b onto _:1 , but it is not possible to map _:1 onto both _:2a and _:2b . As a result, graph comparison will fail and the ETL will be halted. Graph comparison success \u00b6 The following full TriplyETL script shows how graph comparison success is detected in TriplyETL: import { compareGraphs, Etl, loadRdf, Source } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string(`

.`)), compareGraphs(Source.string(`

.

.`)), ) return etl } This example is similar to the previous example, but uses names for the subject terms ( ). Because of this, graph comparison now succeeds. Graph comparison for validation \u00b6 The two examples that were given before illustrate how graph comparison works. But they do not show how graph comparison can be used as a validation approach in TriplyETL pipelines. In order to do so, the following steps must be followed: Identify representative records. Select a limited number of records from your data sources that together are representative for the full data source systems. This often includes typical records, where all/most expected data items are included, as well as atypical records that are uncommon but valid. There must be reasonable confidence that an ETL that produces the correct results for these selected records, will also produce correct results for all other records. Create target linked data. For the representative records identified in step 1, manually create the linked data that the ETL should generate for them. The manually created linked data must be included in the ETL repository, for example by including various TriG files. Implement the ETL. The ETL configuration must be implemented in a generic way that works for all records. Any of the supported ETL configuration languages can be used for this: JSON-LD, RATT, RML, SHACL, SPARQL, or XSLT. Call the graph comparison function. After the ETL has been performed for a specific record, call the graph comparison function to validate whether the linked data in the Internal Store is isomorphic to the linked data that was manually specified. Step 1: Identify representative records \u00b6 To keep our example simple, we use a data source with three simple JSON records: fromJson([ { id: '1', price: 15 }, { id: '2', price: 12 }, { color: 'red', id: '3', price: 16 }, ]), We choose records 1 and 3 as the representative ones. Together these two records use all properties that occur in the source data. These records also allow us to test whether the optional color property is handled correctly. Step 2: Manually create linked data \u00b6 For each record selected in Step 1, we create the linked data that must be generated by TriplyETL: prefix sdo: [] sdo:value 15. and: prefix sdo: [] sdo:color 'red'; sdo:value 16. Step 3: Implement the ETL \u00b6 We use RATT to implement the assertions: addSkolemIri({ key: '_product' }), triple('_product', sdo.price, 'price'), when('width', triple('_product', sdo.color, 'color')), Step 4. Call the graph comparison function \u00b6 Since comparison graphs must be created by hand, only a small number of records will have a corresponding comparison graph. The graph comparison call must therefore only be performed for some records. We use the switch control structure to determine whether a record is eligible for graph comparison. Full script \u00b6 By completing the 4 steps, we end up with the following full TriplyETL script that applies graph comparison to a limited number of representative records: import { _switch, compareGraphs, Etl, fromJson, Source, when } from '@triplyetl/etl/generic' import { addSkolemIri, triple } from '@triplyetl/etl/ratt' import { sdo } from '@triplyetl/vocabularies' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { id: '1', price: 15 }, { id: '2', price: 12 }, { color: 'red', id: '3', price: 16 }, ]), addSkolemIri({ key: '_product' }), triple('_product', sdo.price, 'price'), when('width', triple('_product', sdo.color, 'color')), _switch('id', ['1', compareGraphs(Source.string(` prefix sdo: [] sdo:value 15.`))], ['3', compareGraphs(Source.string(` prefix sdo: [] sdo:color 'red'; sdo:value 16.`))], ), ) return etl } This script succeeds, since the linked data generated by the ETL is isomorphic to the manually specified data. Options \u00b6 contentType specifies the RDF format in which the comparison graphs are serialized. The following values are supported: \"application/json\" \"application/ld+json\" \"application/n-quads\" \"application/n-triples\" \"application/rdf+xml\" \"application/trig\" \"text/html\" \"text/n3\"; \"text/turtle\" defaultGraph specifies the named graph in which the comparison graph must be loaded. This is only useful if the chosen RDF serialization format cannot express named graphs. key unknown See also \u00b6 Graph comparison makes use of graph isomorphism, which is part of the RDF 1.1 Semantics standard ( external link ).","title":"Graph Comparison"},{"location":"triply-etl/validate/graph-comparison/#graph-comparison","text":"Graph comparison is an approach for validating that the data produced by TriplyETL is identical to one or more manually specified graphs.","title":"Graph Comparison"},{"location":"triply-etl/validate/graph-comparison/#graph-comparison-failure","text":"The following full TriplyETL script shows how graph comparison failure is detected in TriplyETL: import { compareGraphs, Etl, loadRdf, Source } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string(`[]

.`)), compareGraphs(Source.string(`[]

. []

.`)), ) return etl } Function loadRdf() loads the following linked data into the Internal Store: graph LR _:1 -- p --> o. Function compareGraphs() compares the contents of the Internal Store to the following linked data: graph LR _:2a -- p --> o. _:2b -- p --> o. Notice that these two graphs are not isomorphic: It is possible to map _:2a and _:2b onto _:1 , but it is not possible to map _:1 onto both _:2a and _:2b . As a result, graph comparison will fail and the ETL will be halted.","title":"Graph comparison failure"},{"location":"triply-etl/validate/graph-comparison/#graph-comparison-success","text":"The following full TriplyETL script shows how graph comparison success is detected in TriplyETL: import { compareGraphs, Etl, loadRdf, Source } from '@triplyetl/etl/generic' export default async function (): Promise { const etl = new Etl() etl.use( loadRdf(Source.string(`

.`)), compareGraphs(Source.string(`

.

.`)), ) return etl } This example is similar to the previous example, but uses names for the subject terms ( ). Because of this, graph comparison now succeeds.","title":"Graph comparison success"},{"location":"triply-etl/validate/graph-comparison/#graph-comparison-for-validation","text":"The two examples that were given before illustrate how graph comparison works. But they do not show how graph comparison can be used as a validation approach in TriplyETL pipelines. In order to do so, the following steps must be followed: Identify representative records. Select a limited number of records from your data sources that together are representative for the full data source systems. This often includes typical records, where all/most expected data items are included, as well as atypical records that are uncommon but valid. There must be reasonable confidence that an ETL that produces the correct results for these selected records, will also produce correct results for all other records. Create target linked data. For the representative records identified in step 1, manually create the linked data that the ETL should generate for them. The manually created linked data must be included in the ETL repository, for example by including various TriG files. Implement the ETL. The ETL configuration must be implemented in a generic way that works for all records. Any of the supported ETL configuration languages can be used for this: JSON-LD, RATT, RML, SHACL, SPARQL, or XSLT. Call the graph comparison function. After the ETL has been performed for a specific record, call the graph comparison function to validate whether the linked data in the Internal Store is isomorphic to the linked data that was manually specified.","title":"Graph comparison for validation"},{"location":"triply-etl/validate/graph-comparison/#step-1-identify-representative-records","text":"To keep our example simple, we use a data source with three simple JSON records: fromJson([ { id: '1', price: 15 }, { id: '2', price: 12 }, { color: 'red', id: '3', price: 16 }, ]), We choose records 1 and 3 as the representative ones. Together these two records use all properties that occur in the source data. These records also allow us to test whether the optional color property is handled correctly.","title":"Step 1: Identify representative records"},{"location":"triply-etl/validate/graph-comparison/#step-2-manually-create-linked-data","text":"For each record selected in Step 1, we create the linked data that must be generated by TriplyETL: prefix sdo: [] sdo:value 15. and: prefix sdo: [] sdo:color 'red'; sdo:value 16.","title":"Step 2: Manually create linked data"},{"location":"triply-etl/validate/graph-comparison/#step-3-implement-the-etl","text":"We use RATT to implement the assertions: addSkolemIri({ key: '_product' }), triple('_product', sdo.price, 'price'), when('width', triple('_product', sdo.color, 'color')),","title":"Step 3: Implement the ETL"},{"location":"triply-etl/validate/graph-comparison/#step-4-call-the-graph-comparison-function","text":"Since comparison graphs must be created by hand, only a small number of records will have a corresponding comparison graph. The graph comparison call must therefore only be performed for some records. We use the switch control structure to determine whether a record is eligible for graph comparison.","title":"Step 4. Call the graph comparison function"},{"location":"triply-etl/validate/graph-comparison/#full-script","text":"By completing the 4 steps, we end up with the following full TriplyETL script that applies graph comparison to a limited number of representative records: import { _switch, compareGraphs, Etl, fromJson, Source, when } from '@triplyetl/etl/generic' import { addSkolemIri, triple } from '@triplyetl/etl/ratt' import { sdo } from '@triplyetl/vocabularies' export default async function (): Promise { const etl = new Etl() etl.use( fromJson([ { id: '1', price: 15 }, { id: '2', price: 12 }, { color: 'red', id: '3', price: 16 }, ]), addSkolemIri({ key: '_product' }), triple('_product', sdo.price, 'price'), when('width', triple('_product', sdo.color, 'color')), _switch('id', ['1', compareGraphs(Source.string(` prefix sdo: [] sdo:value 15.`))], ['3', compareGraphs(Source.string(` prefix sdo: [] sdo:color 'red'; sdo:value 16.`))], ), ) return etl } This script succeeds, since the linked data generated by the ETL is isomorphic to the manually specified data.","title":"Full script"},{"location":"triply-etl/validate/graph-comparison/#options","text":"contentType specifies the RDF format in which the comparison graphs are serialized. The following values are supported: \"application/json\" \"application/ld+json\" \"application/n-quads\" \"application/n-triples\" \"application/rdf+xml\" \"application/trig\" \"text/html\" \"text/n3\"; \"text/turtle\" defaultGraph specifies the named graph in which the comparison graph must be loaded. This is only useful if the chosen RDF serialization format cannot express named graphs. key unknown","title":"Options"},{"location":"triply-etl/validate/graph-comparison/#see-also","text":"Graph comparison makes use of graph isomorphism, which is part of the RDF 1.1 Semantics standard ( external link ).","title":"See also"},{"location":"triply-etl/validate/shacl/","text":"On this page: SHACL Validation Prerequisites A complete example Step 1: Source data Step 2: Target data (informal) Step 3: Information Model (informal) Step 4: Transformation Step 5: Information Model (formal) Step 6: Use the validate() function Step 7: Fix the validation error Option 1: Change the source data Option 2: Change the transformation and/or assertions Option 3: Change the Information Model Reflections on which option to choose SHACL Validation \u00b6 This page documents how SHACL is used to validate linked data in the internal store of your ETL pipeline. Prerequisites \u00b6 SHACL Validation can be used when the following preconditions are met: A data model that uses SHACL constraints. Some data must be asserted in the internal store. If your internal store is still empty, you can read the Assert documentation on how to add assertions to that store. The function for running SHACL Validation is imported as follows: import { validate } from '@triplyetl/etl/shacl' A complete example \u00b6 We use the following full TriplyETL script to explain the validation feature. Do not worry about the length of the script; we will go through each part step-by-step. import { Etl, Source, declarePrefix, fromJson, toTriplyDb } from '@triplyetl/etl/generic' import { iri, pairs } from '@triplyetl/etl/ratt' import { validate } from '@triplyetl/etl/shacl' import { a, foaf } from '@triplyetl/vocabularies' const prefix = { id: declarePrefix('https://triplydb.com/Triply/example/id/'), } export default async function (): Promise { const etl = new Etl() etl.use( fromJson([{ age: 'twelve', id: '1' }]), pairs(iri(prefix.id, 'id'), [a, foaf.Person], [foaf.age, 'age'], ), validate(Source.string(` prefix foaf: prefix rdf: prefix sh: prefix shp: prefix xsd: shp:Person a sh:NodeShape; sh:closed true; sh:ignoredProperties ( rdf:type ); sh:property shp:Person_age; sh:targetClass foaf:Person. shp:Person_age a sh:PropertyShape; sh:datatype xsd:nonNegativeInteger; sh:maxCount 1; sh:minCount 1; sh:path foaf:age.` )), toTriplyDb({ dataset: 'test' }), ) return etl } Step 1: Source data \u00b6 In our example we are using the following source data that records the age of a person: { \"age\": \"twelve\", \"id\": \"id\" } In our example the data source is inline JSON , but notice that any source format could have been used: fromJson([{ age: 'twelve', id: '1' }]), Step 2: Target data (informal) \u00b6 Based on the source data in Step 1, we want to publish the following linked data in TriplyDB: id:123 a foaf:Person; foaf:age 'twelve'. Step 3: Information Model (informal) \u00b6 Our intended target data in Step 2 looks ok at first glance. But we want to specify the requirements for our data in generic terms. Such a specification is called an Information Model . An Information Model is a generic specification of the requirements for our data. It is common to illustrate an Information Model with a picture: classDiagram class foaf_Person { foaf_age: xsd_nonNegativeInteger [1..1] } This Information Model specifies that instances of class foaf:Person must have exactly one value for the foaf:age property. Values for this property must have datatype xsd:nonNegativeInteger . Step 4: Transformation \u00b6 We now have source data (Step 1), and a fair intuition about our target data (Step 2), and an Information Model (Step 3). We can automate the mapping from source to target data with an Assertion : etl.use( fromJson([{ age: 'twelve', id: '1' }]), pairs(iri(prefix.id, 'id'), [a, foaf.Person], [foaf.age, 'age'], ), ) That looks about right: we create instances of class foaf:Person and triples that assert a foaf:age property for each such person. However, a linked data expert may notice that the value 'twelve' from the source data will not be transformed into a non-negative integer ( xsd:nonNegativeInteger ). Indeed, our 'age' assertion will create a literal with datatype xsd:string . Oops, that violates the Information Model! How can we automate such checks? The above example is relatively simple, so a linked data expert may notice the error and fix it. But what happens when the ETL configuration is hundreds of lines long and is spread across multiple files? What happens when there is a large number of classes, and each class has a large number of properties? What if some of the properties are required, while others are optional? Etc. Obviously, any real-world ETL will quickly become too complex to validate by hand. For this reason, TriplyETL provides automated validation. Triply considers having an automated validation step best practice for any ETL. This is the case even for small and simple ETLs, since they tend to grow into complex ones some day. Step 5: Information Model (formal) \u00b6 The linked data ecosystem includes the SHACL standard for encoding Information Models. SHACL allows us to formally express the picture from Step 3. The model is itself expressed in linked data: prefix foaf: prefix rdf: prefix sh: prefix shp: prefix xsd: shp:Person a sh:NodeShape; sh:closed true; sh:ignoredProperties ( rdf:type ); sh:property shp:Person_age; sh:targetClass foaf:Person. shp:Person_age a sh:PropertyShape; sh:datatype xsd:nonNegativeInteger; sh:maxCount 1; sh:minCount 1; sh:path foaf:age. Notice the following details: We enforce a Closed World Semantics (CWA) in our Information Models with the sh:closed property. If a property is not explicitly specified in our Information Model, it is not allowed to be used with instance data. We create IRIs in the dedicated shp: namespace for nodes in the Information Model. Elements in our Information Model are always in a one-to-one correspondence with elements in our Knowledge Model: Node shapes such as shp:Person relate to a specific class such as foaf:Person . Property shapes such as shp:Person_age relate to a specific property such as foaf:age . Step 6: Use the validate() function \u00b6 TriplyETL has a dedicated function that can be used to automatically enforce Information Models such as the one expressed in Step 5. Since the Information Model is relatively small, it can be specified in-line using the string source type . Larger models will probably be stored in a separate file or in a TriplyDB graph or asset. validate(Source.string(` prefix foaf: prefix rdf: prefix sh: prefix shp: prefix xsd: shp:Person a sh:NodeShape; sh:closed true; sh:ignoredProperties ( rdf:type ); sh:property shp:Person_age; sh:targetClass foaf:Person. shp:Person_age a sh:PropertyShape; sh:datatype xsd:nonNegativeInteger; sh:maxCount 1; sh:minCount 1; sh:path foaf:age.` )), When we run the validate() function at the end of our ETL script, we will receive the following error: ERROR (Record #1) SHACL Violation on node id:1 for path foaf:age, source shape shp:Person_age: 1. Value does not have datatype xsd:nonNegativeInteger Oops! The value for the foaf:age property has an incorrect datatype. This is indeed the automated check and feedback that we want. Notice that the requirement that was violated ( shp:Person_age ) is mentioned in the notification. If we want to learn more, we can look up this node in our Information Model. If we want to take a look at a concrete example in our instance data, we can also take look at node id:1 which is also mentioned in the notification. If we want to save our validation report to a a local file or upload it to TriplyDB, we can do that by specifying the Destination inside the validate() function. For example, the snippet below uses a file called model.trig as the Information Model and stores the report to another file, called report.ttl . validate(Source.file('static/model.trig'), {report: { destination: Destination.file(\"report.ttl\")}}) If we want to upload the report to TriplyDB, we can do this like in the example below. Note that adding the IRI of the validation graph under graph is optional, but it's useful to add it to avoid accidentally overwriting your current dataset. validate(Source.file('static/model.trig'), { report: { destination: Destination.triplyDb({ account: 'my-account', dataset: 'my-dataset', }), graph: 'https://example.org' } }) Step 7: Fix the validation error \u00b6 Now that we receive the automated validation error in Step 6, we can look for ways to fix our ETL. Let us take one more look at our current assertions: etl.run( fromJson([{ age: 'twelve', id: '1' }]), pairs(iri(prefix.id, 'id'), [a, foaf.Person], [foaf.age, 'age'], ), ) We could change the term assertion for the value of foaf:age to explicitly make use of the xsd:nonNegativeInteger datatype: literal('age', xsd.nonNegativeInteger) But that would not work in TriplyETL: the Triply software (luckily!) does not allow us to create incorrect linked data. Since the following literal would be incorrect, TriplyETL does not even allow us to assert it: 'twelve'^^xsd:nonNegativeInteger Well, it is nice that TriplyETL does not allow us to create incorrect data. But how can we fix the issue at hand? How can we create linked data that follows our Information Model? As in any ETL error, there are 3 possible solutions: Change the data in the source system. Change the ETL transformations and/or assertions. Change the Information Model. Option 1: Change the source data \u00b6 In this case, changing the data in the source system seem the most logical. After all, there may be multiple ways in which the age of a person can be described using one or more English words. Expressing ages numerically is a good idea in general, since it will make the source data easier to interpret. Option 2: Change the transformation and/or assertions \u00b6 Alternatively, it is possible to transform English words that denote numbers to their corresponding numeric values. Since people can get up to one hundred years old, or even older, there are many words that we must consider and transform. This can be done with the translateAll() transformation : translateAll({ content: 'age', table: { 'one': 1, ... 'twelve': 12, ..., 'one hundred': 100, ..., }, key: '_age', }), pairs(iri(prefix.id, 'id'), [a, foaf.Person], [foaf.age, literal('_age', xsd.nonNegativeInteger)], ), But even the above transformation may not suffice. The same number can be expressed in multiple ways in natural language, so the mapping will never be truly complete and reliable. This seems to be the worst of the three options in this case. Option 3: Change the Information Model \u00b6 Finally, we could loosen the Information Model. For example, we could change the datatype to check for strings: shp:Person_age sh:datatype xsd:string. But that would invalidate ETLs that generate numeric ages for persons, even though that seems perfectly fine, if not better than generating strings. Also, this would allow literals like 'abc' to pass validation as a legal value for foaf:age . Alternatively, we can remove the sh:datatype requirement from our Information Model entirely. That would allow either string-based ages or numeric ages to be specified. But now even weirder values for age, e.g. '2023-01-01'^^xsd:date , would be considered valid values for age. Reflections on which option to choose \u00b6 Notice that TriplyETL does not tell you which of the 3 options you should follow in order to fix issues in your ETL. After all, creating an ETL requires domain knowledge based on which you weight the pros and const of different options. However, TriplyETL does give you the tools to discover issues that prompt you to come up with such solutions. And once you have decided on a specific solution, TriplyETL provides you with the tools to implement it.","title":"SHACL"},{"location":"triply-etl/validate/shacl/#shacl-validation","text":"This page documents how SHACL is used to validate linked data in the internal store of your ETL pipeline.","title":"SHACL Validation"},{"location":"triply-etl/validate/shacl/#prerequisites","text":"SHACL Validation can be used when the following preconditions are met: A data model that uses SHACL constraints. Some data must be asserted in the internal store. If your internal store is still empty, you can read the Assert documentation on how to add assertions to that store. The function for running SHACL Validation is imported as follows: import { validate } from '@triplyetl/etl/shacl'","title":"Prerequisites"},{"location":"triply-etl/validate/shacl/#a-complete-example","text":"We use the following full TriplyETL script to explain the validation feature. Do not worry about the length of the script; we will go through each part step-by-step. import { Etl, Source, declarePrefix, fromJson, toTriplyDb } from '@triplyetl/etl/generic' import { iri, pairs } from '@triplyetl/etl/ratt' import { validate } from '@triplyetl/etl/shacl' import { a, foaf } from '@triplyetl/vocabularies' const prefix = { id: declarePrefix('https://triplydb.com/Triply/example/id/'), } export default async function (): Promise { const etl = new Etl() etl.use( fromJson([{ age: 'twelve', id: '1' }]), pairs(iri(prefix.id, 'id'), [a, foaf.Person], [foaf.age, 'age'], ), validate(Source.string(` prefix foaf: prefix rdf: prefix sh: prefix shp: prefix xsd: shp:Person a sh:NodeShape; sh:closed true; sh:ignoredProperties ( rdf:type ); sh:property shp:Person_age; sh:targetClass foaf:Person. shp:Person_age a sh:PropertyShape; sh:datatype xsd:nonNegativeInteger; sh:maxCount 1; sh:minCount 1; sh:path foaf:age.` )), toTriplyDb({ dataset: 'test' }), ) return etl }","title":"A complete example"},{"location":"triply-etl/validate/shacl/#step-1-source-data","text":"In our example we are using the following source data that records the age of a person: { \"age\": \"twelve\", \"id\": \"id\" } In our example the data source is inline JSON , but notice that any source format could have been used: fromJson([{ age: 'twelve', id: '1' }]),","title":"Step 1: Source data"},{"location":"triply-etl/validate/shacl/#step-2-target-data-informal","text":"Based on the source data in Step 1, we want to publish the following linked data in TriplyDB: id:123 a foaf:Person; foaf:age 'twelve'.","title":"Step 2: Target data (informal)"},{"location":"triply-etl/validate/shacl/#step-3-information-model-informal","text":"Our intended target data in Step 2 looks ok at first glance. But we want to specify the requirements for our data in generic terms. Such a specification is called an Information Model . An Information Model is a generic specification of the requirements for our data. It is common to illustrate an Information Model with a picture: classDiagram class foaf_Person { foaf_age: xsd_nonNegativeInteger [1..1] } This Information Model specifies that instances of class foaf:Person must have exactly one value for the foaf:age property. Values for this property must have datatype xsd:nonNegativeInteger .","title":"Step 3: Information Model (informal)"},{"location":"triply-etl/validate/shacl/#step-4-transformation","text":"We now have source data (Step 1), and a fair intuition about our target data (Step 2), and an Information Model (Step 3). We can automate the mapping from source to target data with an Assertion : etl.use( fromJson([{ age: 'twelve', id: '1' }]), pairs(iri(prefix.id, 'id'), [a, foaf.Person], [foaf.age, 'age'], ), ) That looks about right: we create instances of class foaf:Person and triples that assert a foaf:age property for each such person. However, a linked data expert may notice that the value 'twelve' from the source data will not be transformed into a non-negative integer ( xsd:nonNegativeInteger ). Indeed, our 'age' assertion will create a literal with datatype xsd:string . Oops, that violates the Information Model! How can we automate such checks? The above example is relatively simple, so a linked data expert may notice the error and fix it. But what happens when the ETL configuration is hundreds of lines long and is spread across multiple files? What happens when there is a large number of classes, and each class has a large number of properties? What if some of the properties are required, while others are optional? Etc. Obviously, any real-world ETL will quickly become too complex to validate by hand. For this reason, TriplyETL provides automated validation. Triply considers having an automated validation step best practice for any ETL. This is the case even for small and simple ETLs, since they tend to grow into complex ones some day.","title":"Step 4: Transformation"},{"location":"triply-etl/validate/shacl/#step-5-information-model-formal","text":"The linked data ecosystem includes the SHACL standard for encoding Information Models. SHACL allows us to formally express the picture from Step 3. The model is itself expressed in linked data: prefix foaf: prefix rdf: prefix sh: prefix shp: prefix xsd: shp:Person a sh:NodeShape; sh:closed true; sh:ignoredProperties ( rdf:type ); sh:property shp:Person_age; sh:targetClass foaf:Person. shp:Person_age a sh:PropertyShape; sh:datatype xsd:nonNegativeInteger; sh:maxCount 1; sh:minCount 1; sh:path foaf:age. Notice the following details: We enforce a Closed World Semantics (CWA) in our Information Models with the sh:closed property. If a property is not explicitly specified in our Information Model, it is not allowed to be used with instance data. We create IRIs in the dedicated shp: namespace for nodes in the Information Model. Elements in our Information Model are always in a one-to-one correspondence with elements in our Knowledge Model: Node shapes such as shp:Person relate to a specific class such as foaf:Person . Property shapes such as shp:Person_age relate to a specific property such as foaf:age .","title":"Step 5: Information Model (formal)"},{"location":"triply-etl/validate/shacl/#step-6-use-the-validate-function","text":"TriplyETL has a dedicated function that can be used to automatically enforce Information Models such as the one expressed in Step 5. Since the Information Model is relatively small, it can be specified in-line using the string source type . Larger models will probably be stored in a separate file or in a TriplyDB graph or asset. validate(Source.string(` prefix foaf: prefix rdf: prefix sh: prefix shp: prefix xsd: shp:Person a sh:NodeShape; sh:closed true; sh:ignoredProperties ( rdf:type ); sh:property shp:Person_age; sh:targetClass foaf:Person. shp:Person_age a sh:PropertyShape; sh:datatype xsd:nonNegativeInteger; sh:maxCount 1; sh:minCount 1; sh:path foaf:age.` )), When we run the validate() function at the end of our ETL script, we will receive the following error: ERROR (Record #1) SHACL Violation on node id:1 for path foaf:age, source shape shp:Person_age: 1. Value does not have datatype xsd:nonNegativeInteger Oops! The value for the foaf:age property has an incorrect datatype. This is indeed the automated check and feedback that we want. Notice that the requirement that was violated ( shp:Person_age ) is mentioned in the notification. If we want to learn more, we can look up this node in our Information Model. If we want to take a look at a concrete example in our instance data, we can also take look at node id:1 which is also mentioned in the notification. If we want to save our validation report to a a local file or upload it to TriplyDB, we can do that by specifying the Destination inside the validate() function. For example, the snippet below uses a file called model.trig as the Information Model and stores the report to another file, called report.ttl . validate(Source.file('static/model.trig'), {report: { destination: Destination.file(\"report.ttl\")}}) If we want to upload the report to TriplyDB, we can do this like in the example below. Note that adding the IRI of the validation graph under graph is optional, but it's useful to add it to avoid accidentally overwriting your current dataset. validate(Source.file('static/model.trig'), { report: { destination: Destination.triplyDb({ account: 'my-account', dataset: 'my-dataset', }), graph: 'https://example.org' } })","title":"Step 6: Use the validate() function"},{"location":"triply-etl/validate/shacl/#step-7-fix-the-validation-error","text":"Now that we receive the automated validation error in Step 6, we can look for ways to fix our ETL. Let us take one more look at our current assertions: etl.run( fromJson([{ age: 'twelve', id: '1' }]), pairs(iri(prefix.id, 'id'), [a, foaf.Person], [foaf.age, 'age'], ), ) We could change the term assertion for the value of foaf:age to explicitly make use of the xsd:nonNegativeInteger datatype: literal('age', xsd.nonNegativeInteger) But that would not work in TriplyETL: the Triply software (luckily!) does not allow us to create incorrect linked data. Since the following literal would be incorrect, TriplyETL does not even allow us to assert it: 'twelve'^^xsd:nonNegativeInteger Well, it is nice that TriplyETL does not allow us to create incorrect data. But how can we fix the issue at hand? How can we create linked data that follows our Information Model? As in any ETL error, there are 3 possible solutions: Change the data in the source system. Change the ETL transformations and/or assertions. Change the Information Model.","title":"Step 7: Fix the validation error"},{"location":"triply-etl/validate/shacl/#option-1-change-the-source-data","text":"In this case, changing the data in the source system seem the most logical. After all, there may be multiple ways in which the age of a person can be described using one or more English words. Expressing ages numerically is a good idea in general, since it will make the source data easier to interpret.","title":"Option 1: Change the source data"},{"location":"triply-etl/validate/shacl/#option-2-change-the-transformation-andor-assertions","text":"Alternatively, it is possible to transform English words that denote numbers to their corresponding numeric values. Since people can get up to one hundred years old, or even older, there are many words that we must consider and transform. This can be done with the translateAll() transformation : translateAll({ content: 'age', table: { 'one': 1, ... 'twelve': 12, ..., 'one hundred': 100, ..., }, key: '_age', }), pairs(iri(prefix.id, 'id'), [a, foaf.Person], [foaf.age, literal('_age', xsd.nonNegativeInteger)], ), But even the above transformation may not suffice. The same number can be expressed in multiple ways in natural language, so the mapping will never be truly complete and reliable. This seems to be the worst of the three options in this case.","title":"Option 2: Change the transformation and/or assertions"},{"location":"triply-etl/validate/shacl/#option-3-change-the-information-model","text":"Finally, we could loosen the Information Model. For example, we could change the datatype to check for strings: shp:Person_age sh:datatype xsd:string. But that would invalidate ETLs that generate numeric ages for persons, even though that seems perfectly fine, if not better than generating strings. Also, this would allow literals like 'abc' to pass validation as a legal value for foaf:age . Alternatively, we can remove the sh:datatype requirement from our Information Model entirely. That would allow either string-based ages or numeric ages to be specified. But now even weirder values for age, e.g. '2023-01-01'^^xsd:date , would be considered valid values for age.","title":"Option 3: Change the Information Model"},{"location":"triply-etl/validate/shacl/#reflections-on-which-option-to-choose","text":"Notice that TriplyETL does not tell you which of the 3 options you should follow in order to fix issues in your ETL. After all, creating an ETL requires domain knowledge based on which you weight the pros and const of different options. However, TriplyETL does give you the tools to discover issues that prompt you to come up with such solutions. And once you have decided on a specific solution, TriplyETL provides you with the tools to implement it.","title":"Reflections on which option to choose"},{"location":"triplydb-changelog/","text":"On this page: 24.12.2 24.12.1 24.11.2 24.11.1 24.10.2 24.10.1 24.09.2 24.09.1 24.08.2 24.08.1 24.08.0 24.07.1 24.07.0 24.06.1 24.06.0 24.05.1 24.05.0 24.04.1 24.04.0 24.03.1 24.03.0 24.02.2 24.02.1 24.02.0 24.01.0 23.12.1 23.12.0 23.11.1 23.11.0 24.12.2 \u00b6 Release date: 2024-12-18 Issues fixed #10006 Cancelling a linked data upload may result in an error #9711 [SaaS] Incidental requests are dropped 24.12.1 \u00b6 Release date: 2024-12-05 Issues fixed #9888 [Query job] Query job for very simple construct query may fail #9959 [SPARQL IDE] SPARQL IDE may show same autocomplete suggestion multiple times #9954 [Query job] Query jobs may get sometimes inadvertently cancelled #9927 [Stories] Stories with many queries are slow to load in the browser 24.11.2 \u00b6 Release date: 2024-11-22 Issues fixed #9017 [Speedy] Some queries take too long when reordering/optimizing a query #9780 [SaaS] Enable OCSP stapling for TLS 24.11.1 \u00b6 Release date: 2024-11-08 Issues fixed #8878 Include correct dataset modification date for JSON schema descriptions #9694 Speedy SPARQL endpoints are not included in the dataset NDE descriptions #9807 Parsing of XML content assigns an incorrect datatype #9752 Parsing of XML is incorrect for escaped ampersand #9718 Unintentional grey bar after renaming a graph #9767 [Speedy] Incorrect n-triples serialization when handling blank nodes from external SPARQL endpoints #9557 [SPARQL-IDE] Yellow marker in SPARQL Editor is incorrect. This feature has been disabled until we improve some of the rough edges in future releases. #9791 [SPARQL-IDE] Error when tying to view a populated query from a gallery visualization with a query variable #9774 [SPARQL-IDE] Copy shortened URL doesn't work for webkit browsers 24.10.2 \u00b6 Release date: 2024-10-25 Issues fixed #9739 [Query jobs] Query job artifacts, exposed to system administrators, are incomplete #9740 [Query jobs] Cannot start a query job for queries with a default variable #9701 [Query jobs] A query job with invalid graph name fails at a late stage, where we expect it to fail early 24.10.1 \u00b6 Release date: 2024-10-11 Issues fixed #9695 [Speedy] ORDER BY doesn't order by the second operand if the first one is an error #9658 [SPARQL-IDE] Capture position doesn't work with height settings for stories 24.09.2 \u00b6 Release date: 2024-09-27 Features #8471 Admins on a TriplyDB instance can restrict users to only one active session #9148 [SPARQL-IDE] Added support for overlaying 3D tiles, from a URL, in the Geo visualization Issues fixed #9646 Base URI when uploading data always uses the uploading account instead of the target account #9561 Saving a new version of a saved query does not always save its visualization configuration #9600 When editing query metadata, the service \"Speedy\" is not always visible #9633 [Speedy] Regression, introduced in version 24.09.1, causing fatal error when executing certain queries 24.09.1 \u00b6 Features #9535 [SPARQL-IDE] The camera point-of-view can be saved as part of a query, in the geo visualization. This enables more precise use in stories #9527 [Speedy] Improved the user message when there are errors within a SHACL SPARQL function Issues fixed #9556 / #9550 [Speedy] Reordering of queries with both an OPTIONAL and ORDER BY clause did not always return the correct result #9579 [Speedy] Incorrect coordinate transformation from epsg:7415 to epsg:28992 when using geof:transform GeoSPARQL function #9528 [Speedy] Incorrect results when using a SHACL SPARQL function due to reordering #9569 [Speedy] Incorrect results when using a SHACL SPARQL function with blank nodes #9523 [SPARQL-IDE] Unable to export certain chart visualizations to SVG 24.08.2 \u00b6 Features #8929 Added the display name of the user who created a version to the list of query versions #9399 The class frequencies have been added to Graphs page of a dataset. The Class frequency tab under Insights is still available but is deprecated. This will be removed in a future release #9400 The Copy dataset button has been moved from the Dataset panel to a drop-down menu instead. This makes it consistent with actions on saved queries and stories Issues fixed #9565 Failing uploads aren't reported to users in real-time #9507 [SPARQL-IDE] The edit dialog of the Google chart visualization has incorrect styling #9271 [SPARQL-IDE] Incorrect coloring for SPARQL variables that are used after the WHERE clause and are not projected outward 24.08.1 \u00b6 Features #9388 Improved navigation of the admin settings pages #9453 [SPARQL-IDE] Increased the maximum pitch in the geo visualization #8495 [Speedy] Support more Coordinate Reference Systems Issues fixed #9483 When paginating over the results of a saved query, in certain cases the limit of a subselect is wrongfully removed 24.08.0 \u00b6 Issues fixed #9328 Captions to query visualizations in stories are too wide #9428 [SPARQL-IDE] API variables that are used in the query string are incorrectly colored 24.07.1 \u00b6 Features #8928 [SPARQL-IDE] Improved usability of the gallery visualization with customization of keywords #8587 [Speedy] Improved the performance of queries that use arbitrary length path matching by 5-100x #9035 [Speedy] Added support for federating to internal or private Speedy SPARQL endpoints on the same instance, given the necessary permissions #9380 Improved the look and feel of the dataset panel to accommodate upcoming features Issues fixed #9371 Changing a query in a data story does not update the query result #9385 [Speedy] Using SHACL SPARQL functions in combination with a LIMIT clause returned an error #9395 [Speedy] In a rare instance, query reordering produces incorrect results 24.07.0 \u00b6 Features #9191 [Speedy] Added support for using SHACL SPARQL functions defined within a dataset #7003 [Speedy] Added support for the x and q flags, in the regex() and replace() functions #9051 [SPARQL-IDE] Added a button to auto-format SPARQL queries Issues fixed #9330 Using saved query variables sometimes creates a syntactically invalid query #9173 Unclear error message when upload JSON-LD files with an invalid IRI 24.06.1 \u00b6 Features #9122 [Speedy] Added support for the units vocabulary in the GeoSPARQL functions geof:distance and geof:buffer Issues fixed #9298 Unable to upload files from a URL with many redirects #9312 SPARQL results in CSV and TSV formats incorrectly handle escape characters in literals #9237 [SPARQL-IDE] Repair messages do not signify their actions well enough #9122 [Speedy] GeoSPARQL functions geof:sfWithin , geof:sfOverlaps and geof:sfTouches did not support complex polygons 24.06.0 \u00b6 Issues fixed #9212 [SPARQL-IDE] Unused variables are sometimes displayed incorrectly on first load #9214 [SPARQL-IDE] Unexpected syntax error for a valid query 24.05.1 \u00b6 Features #9081 [SPARQL-IDE] Support for a JSON-LD visualization is now more prominent #9146 [SPARQL-IDE] Added informative hints for SPARQL queries that return 0 results Issues fixed #9083 Parallel LD-uploads cause conflicting graphs 24.05.0 \u00b6 Features #9063 [Speedy] Added support for geof:buffer GeoSPARQL functions Issues fixed #8960 Inconsistent formatting notation for numbers #9174 [SPARQL IDE] Saved Query editor crashes when a JSON-LD Frame is applied 24.04.1 \u00b6 Features #9136 [SPARQL-IDE] Added support for rendering gLTF 3D models #9059 - #9062 [Speedy] Added support for geof:distance , geof:minZ , geof:maxZ and geof:boundary GeoSPARQL functions Issues fixed #9114 TriplyDB reports valid IRIs as invalid when they contain atypical characters #9152 Virtuoso SPARQL service exhibits different querying behaviour after sync operation #9160 [SPARQL-IDE] Chart visualization not working for certain SPARQL responses 24.04.0 \u00b6 Features #9050 Added a button to display the available keyboard shortcuts for the SPARQL-IDE #9055 - #9058 [Speedy] Added support for geof:sfWithin , geof:sfOverlaps , geof:sfTouches and geof:aggBoundingBox GeoSPARQL functions Issues fixed #9048 [SPARQL-IDE] Non-string literals were not accepted as valid #9005 [SPARQL-IDE] Variable auto-complete not working in BIND clause #9068 [SPARQL-IDE] Query can be executed when there is no service available #9053 [SPARQL-IDE] Saved query view shows \"large result\" dialog when not applicable 24.03.1 \u00b6 Features #8580 Added saved query execution status in the TriplyDB administrator view #8798 Improved the performance when navigating to most queries and stories, by proactively caching query results #8580 [Speedy] Added support for xsd:negativeinteger , xsd:positiveInteger , xsd:nonNegativeInteger and xsd:nonPositiveInteger casting functions #8681 [Speedy] Improved query performance when federating to a public dataset on the same TriplyDB instance #9000 [SPARQL-IDE] Unreferenced variables are now identifiable by their colour #9028 [SPARQL-IDE] Improved the rendering of polygons on a map and map interactions on mobile devices Issues fixed #8601 [SPARQL-IDE] Unexpected auto-complete after operator #8954 [SPARQL-IDE] SPARQL IDE shows 2D data in tilted view #9004 [SPARQL-IDE] LD-frame editor doesn't show icon to display the entire query #9006 [SPARQL-IDE] Auto-indent references the next line instead of the previous line #9029 [SPARQL-IDE] Unactionable warning for some plugins in stories 24.03.0 \u00b6 Issues fixed #8600 [SPARQL-IDE] Automatically inserted brackets caused syntax errors #8999 [SPARQL-IDE] Editor inserts duplicate prefix declarations when comments are used #8780 [Speedy] Queries with LIMIT statements took longer to execute than expected 24.02.2 \u00b6 Features #8659 [SPARQL-IDE] Show a notification when a SPARQL result set contains unrecognized geographic shapes #8868 [Speedy] Improved the performance of some aggregates queries. #8834 / #8892 [SPARQL-IDE] More errors are now validated by the SPARQL IDE. For example, nested aggregates ( count(count(...)) ) now report as an error. #8834 TriplyDB supports query annotations. An TriplyDB SPARQL annotation looks like this: #! cache: false This annotation makes ensure that the TriplyDB cache is bypassed. Issues fixed #8913 [Speedy] Some arithmetic SPARQL functions return 0 results #8598 [SPARQL-IDE] Triggering context menu behaves odd when one is already open #8660 [SPARQL-IDE] QGIS does not recognized the an exported shapefile #8918 Some small services fail to consistently start 24.02.1 \u00b6 Features #8795 Support use of the attribute in markdown and HTML (used in dataset/account/query descriptions, or by the SPARQL IDE) #8796 Support different size dimensions for story elements Issues fixed #8720 Invalid saved-query variables are not validated in the stories UI #8792 [SPARQL-IDE] A combination of the pivot table and google charts visualization may not render #8779 [SPARQL-IDE] Multiline errors are not rendered correctly #8690 [Speedy] Some atypical queries with large group-by's may result in an error #8606 [Speedy] Some valid regular expressions throw an error #8765 [Speedy] Federating to virtuoso does not work in some cases #8749 Syncing a service may fail when there are many concurrent requests #8686 Syncing a service may fail after a service is renamed #8793 [SPARQL IDE] The gallery image-widget result (populated by the ?imageWidget variable) is not shown when printing a story 24.02.0 \u00b6 Features #7616 Render skolemized IRIs better #8728 [SPARQL IDE] Improved ui for rendering grouped geo shapes Issues fixed Speedy may return too many results when using a FROM clause #8695 #8602 #8603 #8770 [SPARQL IDE] Fixed UX issues with tabs and autocompletion 24.01.0 \u00b6 Features - #8502 [SPARQL IDE] Add confirmation mechanisms in the browser to avoid the browser rendering too much information. This avoids issues where the browser is rendered unresponsive Issues fixed #8584 Insufficient request validation using the saved-query API #8092 Dataset metadata may report wrong number of statements for atypical uploads #8364 Uploads with combinations of atypical invalid IRIs may result in an error #8697 [SPARQL IDE] Changing the network visualization may result in an client-side error #8588 Saved queries with an LD-Frame always show up as modified and in draft state 23.12.1 \u00b6 Features #4209 Add queries overview page for TriplyDB administrators #8494 Improve UX for service selection in saved queries by removing the option for selecting one specific service. This option was unintuitive and not recommended. Instead, using a service type is recommended (and not the only available option). #8558 #8556 :[SPARQL speedy] Improve performance of queries with filter not exists and optionals by 30% to 180%. Issues fixed #8584 Uninformative error message when using terms autocompletion API #8512 Uninformative error message when requesting elasticsearch mapping information during a sync operation 23.12.0 \u00b6 Features #8224 [SPARQL IDE] Replace the current SPARQL editor with SPARQL IDE. The new SPARQL IDE will be gradually enabled on all TriplyDB deployments. The editor and result visualization have a slightly different look Added shortcuts for powerusers (press -? on the SPARQL IDE page to show them) Performance improvements when writing larger queries Consolidated the visualizations: the geo 3d plugin is now combined with the regular geo plugin, and the markup visualization is now part of the gallery visualization. The new editor is backwards compatible with the old editor. The geo-events plugin (where geographic information can be rendered as a timeline) is deprecated and not present in the SPARQL IDE. #8420 #8457 [SPARQL speedy] Improve performance of most SPARQL queries with 40% to 200% #8504 Improve UX for service selection in saved queries: the type of a manually created service has precendence now over speedy #8456 Support in the UI for deleting all dataset assets #8481 Include link to changelog in the footer of all TriplyDB pages 23.11.1 \u00b6 Features #8266 Automatic date detection and indexing in Elasticsearch Issues fixed #8459 Unable to upload instance logo #8444 Virtuoso service becomes unresponsive for atypical SPARQL query #8414 [SPARQL speedy] Querying non-existent graph may result in an error #8415 [SPARQL speedy] Query with service clause and filter may result in an error #8256 Jena concistently fails to start for a specific dataset #8371 The LD-Browser does not render an image, even though an image is present in the describe result. 23.11.0 \u00b6 Features #8324 Added quick-actions for changing the saved-query access level from the stories page. These quick-actions are shown when the story access level is incompatible with the saved-query access level (e.g., the story is public, but a saved-query is private) #8308 [SPARQL Speedy] Support for the geof:area function #8309 [SPARQL Speedy] Support for the geof:transform function Issues fixed #8352 Setting custom mapping in Elasticsearch may result in default mappings getting ignored #8326 Setting invalid custom mappings for Elasticsearch results in uninformative error #8256 Jena concistently fails to start for a specific dataset #8371 The LD-Browser does not render an image, even though an image is present in the describe result.","title":"Changelog"},{"location":"triplydb-changelog/#24.12.2","text":"Release date: 2024-12-18 Issues fixed #10006 Cancelling a linked data upload may result in an error #9711 [SaaS] Incidental requests are dropped","title":"24.12.2"},{"location":"triplydb-changelog/#24.12.1","text":"Release date: 2024-12-05 Issues fixed #9888 [Query job] Query job for very simple construct query may fail #9959 [SPARQL IDE] SPARQL IDE may show same autocomplete suggestion multiple times #9954 [Query job] Query jobs may get sometimes inadvertently cancelled #9927 [Stories] Stories with many queries are slow to load in the browser","title":"24.12.1"},{"location":"triplydb-changelog/#24.11.2","text":"Release date: 2024-11-22 Issues fixed #9017 [Speedy] Some queries take too long when reordering/optimizing a query #9780 [SaaS] Enable OCSP stapling for TLS","title":"24.11.2"},{"location":"triplydb-changelog/#24.11.1","text":"Release date: 2024-11-08 Issues fixed #8878 Include correct dataset modification date for JSON schema descriptions #9694 Speedy SPARQL endpoints are not included in the dataset NDE descriptions #9807 Parsing of XML content assigns an incorrect datatype #9752 Parsing of XML is incorrect for escaped ampersand #9718 Unintentional grey bar after renaming a graph #9767 [Speedy] Incorrect n-triples serialization when handling blank nodes from external SPARQL endpoints #9557 [SPARQL-IDE] Yellow marker in SPARQL Editor is incorrect. This feature has been disabled until we improve some of the rough edges in future releases. #9791 [SPARQL-IDE] Error when tying to view a populated query from a gallery visualization with a query variable #9774 [SPARQL-IDE] Copy shortened URL doesn't work for webkit browsers","title":"24.11.1"},{"location":"triplydb-changelog/#24.10.2","text":"Release date: 2024-10-25 Issues fixed #9739 [Query jobs] Query job artifacts, exposed to system administrators, are incomplete #9740 [Query jobs] Cannot start a query job for queries with a default variable #9701 [Query jobs] A query job with invalid graph name fails at a late stage, where we expect it to fail early","title":"24.10.2"},{"location":"triplydb-changelog/#24.10.1","text":"Release date: 2024-10-11 Issues fixed #9695 [Speedy] ORDER BY doesn't order by the second operand if the first one is an error #9658 [SPARQL-IDE] Capture position doesn't work with height settings for stories","title":"24.10.1"},{"location":"triplydb-changelog/#24.09.2","text":"Release date: 2024-09-27 Features #8471 Admins on a TriplyDB instance can restrict users to only one active session #9148 [SPARQL-IDE] Added support for overlaying 3D tiles, from a URL, in the Geo visualization Issues fixed #9646 Base URI when uploading data always uses the uploading account instead of the target account #9561 Saving a new version of a saved query does not always save its visualization configuration #9600 When editing query metadata, the service \"Speedy\" is not always visible #9633 [Speedy] Regression, introduced in version 24.09.1, causing fatal error when executing certain queries","title":"24.09.2"},{"location":"triplydb-changelog/#24.09.1","text":"Features #9535 [SPARQL-IDE] The camera point-of-view can be saved as part of a query, in the geo visualization. This enables more precise use in stories #9527 [Speedy] Improved the user message when there are errors within a SHACL SPARQL function Issues fixed #9556 / #9550 [Speedy] Reordering of queries with both an OPTIONAL and ORDER BY clause did not always return the correct result #9579 [Speedy] Incorrect coordinate transformation from epsg:7415 to epsg:28992 when using geof:transform GeoSPARQL function #9528 [Speedy] Incorrect results when using a SHACL SPARQL function due to reordering #9569 [Speedy] Incorrect results when using a SHACL SPARQL function with blank nodes #9523 [SPARQL-IDE] Unable to export certain chart visualizations to SVG","title":"24.09.1"},{"location":"triplydb-changelog/#24.08.2","text":"Features #8929 Added the display name of the user who created a version to the list of query versions #9399 The class frequencies have been added to Graphs page of a dataset. The Class frequency tab under Insights is still available but is deprecated. This will be removed in a future release #9400 The Copy dataset button has been moved from the Dataset panel to a drop-down menu instead. This makes it consistent with actions on saved queries and stories Issues fixed #9565 Failing uploads aren't reported to users in real-time #9507 [SPARQL-IDE] The edit dialog of the Google chart visualization has incorrect styling #9271 [SPARQL-IDE] Incorrect coloring for SPARQL variables that are used after the WHERE clause and are not projected outward","title":"24.08.2"},{"location":"triplydb-changelog/#24.08.1","text":"Features #9388 Improved navigation of the admin settings pages #9453 [SPARQL-IDE] Increased the maximum pitch in the geo visualization #8495 [Speedy] Support more Coordinate Reference Systems Issues fixed #9483 When paginating over the results of a saved query, in certain cases the limit of a subselect is wrongfully removed","title":"24.08.1"},{"location":"triplydb-changelog/#24.08.0","text":"Issues fixed #9328 Captions to query visualizations in stories are too wide #9428 [SPARQL-IDE] API variables that are used in the query string are incorrectly colored","title":"24.08.0"},{"location":"triplydb-changelog/#24.07.1","text":"Features #8928 [SPARQL-IDE] Improved usability of the gallery visualization with customization of keywords #8587 [Speedy] Improved the performance of queries that use arbitrary length path matching by 5-100x #9035 [Speedy] Added support for federating to internal or private Speedy SPARQL endpoints on the same instance, given the necessary permissions #9380 Improved the look and feel of the dataset panel to accommodate upcoming features Issues fixed #9371 Changing a query in a data story does not update the query result #9385 [Speedy] Using SHACL SPARQL functions in combination with a LIMIT clause returned an error #9395 [Speedy] In a rare instance, query reordering produces incorrect results","title":"24.07.1"},{"location":"triplydb-changelog/#24.07.0","text":"Features #9191 [Speedy] Added support for using SHACL SPARQL functions defined within a dataset #7003 [Speedy] Added support for the x and q flags, in the regex() and replace() functions #9051 [SPARQL-IDE] Added a button to auto-format SPARQL queries Issues fixed #9330 Using saved query variables sometimes creates a syntactically invalid query #9173 Unclear error message when upload JSON-LD files with an invalid IRI","title":"24.07.0"},{"location":"triplydb-changelog/#24.06.1","text":"Features #9122 [Speedy] Added support for the units vocabulary in the GeoSPARQL functions geof:distance and geof:buffer Issues fixed #9298 Unable to upload files from a URL with many redirects #9312 SPARQL results in CSV and TSV formats incorrectly handle escape characters in literals #9237 [SPARQL-IDE] Repair messages do not signify their actions well enough #9122 [Speedy] GeoSPARQL functions geof:sfWithin , geof:sfOverlaps and geof:sfTouches did not support complex polygons","title":"24.06.1"},{"location":"triplydb-changelog/#24.06.0","text":"Issues fixed #9212 [SPARQL-IDE] Unused variables are sometimes displayed incorrectly on first load #9214 [SPARQL-IDE] Unexpected syntax error for a valid query","title":"24.06.0"},{"location":"triplydb-changelog/#24.05.1","text":"Features #9081 [SPARQL-IDE] Support for a JSON-LD visualization is now more prominent #9146 [SPARQL-IDE] Added informative hints for SPARQL queries that return 0 results Issues fixed #9083 Parallel LD-uploads cause conflicting graphs","title":"24.05.1"},{"location":"triplydb-changelog/#24.05.0","text":"Features #9063 [Speedy] Added support for geof:buffer GeoSPARQL functions Issues fixed #8960 Inconsistent formatting notation for numbers #9174 [SPARQL IDE] Saved Query editor crashes when a JSON-LD Frame is applied","title":"24.05.0"},{"location":"triplydb-changelog/#24.04.1","text":"Features #9136 [SPARQL-IDE] Added support for rendering gLTF 3D models #9059 - #9062 [Speedy] Added support for geof:distance , geof:minZ , geof:maxZ and geof:boundary GeoSPARQL functions Issues fixed #9114 TriplyDB reports valid IRIs as invalid when they contain atypical characters #9152 Virtuoso SPARQL service exhibits different querying behaviour after sync operation #9160 [SPARQL-IDE] Chart visualization not working for certain SPARQL responses","title":"24.04.1"},{"location":"triplydb-changelog/#24.04.0","text":"Features #9050 Added a button to display the available keyboard shortcuts for the SPARQL-IDE #9055 - #9058 [Speedy] Added support for geof:sfWithin , geof:sfOverlaps , geof:sfTouches and geof:aggBoundingBox GeoSPARQL functions Issues fixed #9048 [SPARQL-IDE] Non-string literals were not accepted as valid #9005 [SPARQL-IDE] Variable auto-complete not working in BIND clause #9068 [SPARQL-IDE] Query can be executed when there is no service available #9053 [SPARQL-IDE] Saved query view shows \"large result\" dialog when not applicable","title":"24.04.0"},{"location":"triplydb-changelog/#24.03.1","text":"Features #8580 Added saved query execution status in the TriplyDB administrator view #8798 Improved the performance when navigating to most queries and stories, by proactively caching query results #8580 [Speedy] Added support for xsd:negativeinteger , xsd:positiveInteger , xsd:nonNegativeInteger and xsd:nonPositiveInteger casting functions #8681 [Speedy] Improved query performance when federating to a public dataset on the same TriplyDB instance #9000 [SPARQL-IDE] Unreferenced variables are now identifiable by their colour #9028 [SPARQL-IDE] Improved the rendering of polygons on a map and map interactions on mobile devices Issues fixed #8601 [SPARQL-IDE] Unexpected auto-complete after operator #8954 [SPARQL-IDE] SPARQL IDE shows 2D data in tilted view #9004 [SPARQL-IDE] LD-frame editor doesn't show icon to display the entire query #9006 [SPARQL-IDE] Auto-indent references the next line instead of the previous line #9029 [SPARQL-IDE] Unactionable warning for some plugins in stories","title":"24.03.1"},{"location":"triplydb-changelog/#24.03.0","text":"Issues fixed #8600 [SPARQL-IDE] Automatically inserted brackets caused syntax errors #8999 [SPARQL-IDE] Editor inserts duplicate prefix declarations when comments are used #8780 [Speedy] Queries with LIMIT statements took longer to execute than expected","title":"24.03.0"},{"location":"triplydb-changelog/#24.02.2","text":"Features #8659 [SPARQL-IDE] Show a notification when a SPARQL result set contains unrecognized geographic shapes #8868 [Speedy] Improved the performance of some aggregates queries. #8834 / #8892 [SPARQL-IDE] More errors are now validated by the SPARQL IDE. For example, nested aggregates ( count(count(...)) ) now report as an error. #8834 TriplyDB supports query annotations. An TriplyDB SPARQL annotation looks like this: #! cache: false This annotation makes ensure that the TriplyDB cache is bypassed. Issues fixed #8913 [Speedy] Some arithmetic SPARQL functions return 0 results #8598 [SPARQL-IDE] Triggering context menu behaves odd when one is already open #8660 [SPARQL-IDE] QGIS does not recognized the an exported shapefile #8918 Some small services fail to consistently start","title":"24.02.2"},{"location":"triplydb-changelog/#24.02.1","text":"Features #8795 Support use of the attribute in markdown and HTML (used in dataset/account/query descriptions, or by the SPARQL IDE) #8796 Support different size dimensions for story elements Issues fixed #8720 Invalid saved-query variables are not validated in the stories UI #8792 [SPARQL-IDE] A combination of the pivot table and google charts visualization may not render #8779 [SPARQL-IDE] Multiline errors are not rendered correctly #8690 [Speedy] Some atypical queries with large group-by's may result in an error #8606 [Speedy] Some valid regular expressions throw an error #8765 [Speedy] Federating to virtuoso does not work in some cases #8749 Syncing a service may fail when there are many concurrent requests #8686 Syncing a service may fail after a service is renamed #8793 [SPARQL IDE] The gallery image-widget result (populated by the ?imageWidget variable) is not shown when printing a story","title":"24.02.1"},{"location":"triplydb-changelog/#24.02.0","text":"Features #7616 Render skolemized IRIs better #8728 [SPARQL IDE] Improved ui for rendering grouped geo shapes Issues fixed Speedy may return too many results when using a FROM clause #8695 #8602 #8603 #8770 [SPARQL IDE] Fixed UX issues with tabs and autocompletion","title":"24.02.0"},{"location":"triplydb-changelog/#24.01.0","text":"Features - #8502 [SPARQL IDE] Add confirmation mechanisms in the browser to avoid the browser rendering too much information. This avoids issues where the browser is rendered unresponsive Issues fixed #8584 Insufficient request validation using the saved-query API #8092 Dataset metadata may report wrong number of statements for atypical uploads #8364 Uploads with combinations of atypical invalid IRIs may result in an error #8697 [SPARQL IDE] Changing the network visualization may result in an client-side error #8588 Saved queries with an LD-Frame always show up as modified and in draft state","title":"24.01.0"},{"location":"triplydb-changelog/#23.12.1","text":"Features #4209 Add queries overview page for TriplyDB administrators #8494 Improve UX for service selection in saved queries by removing the option for selecting one specific service. This option was unintuitive and not recommended. Instead, using a service type is recommended (and not the only available option). #8558 #8556 :[SPARQL speedy] Improve performance of queries with filter not exists and optionals by 30% to 180%. Issues fixed #8584 Uninformative error message when using terms autocompletion API #8512 Uninformative error message when requesting elasticsearch mapping information during a sync operation","title":"23.12.1"},{"location":"triplydb-changelog/#23.12.0","text":"Features #8224 [SPARQL IDE] Replace the current SPARQL editor with SPARQL IDE. The new SPARQL IDE will be gradually enabled on all TriplyDB deployments. The editor and result visualization have a slightly different look Added shortcuts for powerusers (press -? on the SPARQL IDE page to show them) Performance improvements when writing larger queries Consolidated the visualizations: the geo 3d plugin is now combined with the regular geo plugin, and the markup visualization is now part of the gallery visualization. The new editor is backwards compatible with the old editor. The geo-events plugin (where geographic information can be rendered as a timeline) is deprecated and not present in the SPARQL IDE. #8420 #8457 [SPARQL speedy] Improve performance of most SPARQL queries with 40% to 200% #8504 Improve UX for service selection in saved queries: the type of a manually created service has precendence now over speedy #8456 Support in the UI for deleting all dataset assets #8481 Include link to changelog in the footer of all TriplyDB pages","title":"23.12.0"},{"location":"triplydb-changelog/#23.11.1","text":"Features #8266 Automatic date detection and indexing in Elasticsearch Issues fixed #8459 Unable to upload instance logo #8444 Virtuoso service becomes unresponsive for atypical SPARQL query #8414 [SPARQL speedy] Querying non-existent graph may result in an error #8415 [SPARQL speedy] Query with service clause and filter may result in an error #8256 Jena concistently fails to start for a specific dataset #8371 The LD-Browser does not render an image, even though an image is present in the describe result.","title":"23.11.1"},{"location":"triplydb-changelog/#23.11.0","text":"Features #8324 Added quick-actions for changing the saved-query access level from the stories page. These quick-actions are shown when the story access level is incompatible with the saved-query access level (e.g., the story is public, but a saved-query is private) #8308 [SPARQL Speedy] Support for the geof:area function #8309 [SPARQL Speedy] Support for the geof:transform function Issues fixed #8352 Setting custom mapping in Elasticsearch may result in default mappings getting ignored #8326 Setting invalid custom mappings for Elasticsearch results in uninformative error #8256 Jena concistently fails to start for a specific dataset #8371 The LD-Browser does not render an image, even though an image is present in the describe result.","title":"23.11.0"},{"location":"triplydb-js/","text":"On this page: Overview TriplyDB.js is the official programming library for interacting with TriplyDB . TriplyDB.js allows you to automate operations that would otherwise be performed in the TriplyDB GUI. TriplyDB.js is implemented in TypeScript . TypeScript is a type-safe language that transpiles to JavaScript . This allows you to use TriplyDB.js in web browsers as well as on servers (using Node.js ). TriplyDB.js is open source and its source code is published on GitHub . Please contact support@triply.cc for questions and suggestions. Overview \u00b6 TriplyDB.js contains several classes, each with their own methods. The documentation for every method includes at least one code example. These code examples can be run by inserting them into the following overall script. Notice that process.env.TOKEN picks up an API token that is stored in the environment variable called TOKEN . Follow the steps on this page to create a new API token in the TriplyDB GUI. require('source-map-support/register') import App from '@triply/triplydb' const triply = App.get({ token: process.env.TOKEN }) async function run() { // This is where the code examples in this reference section should be placed. } run().catch(e => { console.error(e) process.exit(1) }) process.on('uncaughtException', function (e) { console.error('Uncaught exception', e) process.exit(1) }) process.on('unhandledRejection', (reason, p) => { console.error('Unhandled Rejection at: Promise', p, 'reason:', reason) process.exit(1) }) The following sections document the various TriplyDB.js classes. Each class comes with its own methods. Classes are related through methods that connect them. For example, calling the getAccount method on a App object returns an Account object.","title":"Overview"},{"location":"triplydb-js/#overview","text":"TriplyDB.js contains several classes, each with their own methods. The documentation for every method includes at least one code example. These code examples can be run by inserting them into the following overall script. Notice that process.env.TOKEN picks up an API token that is stored in the environment variable called TOKEN . Follow the steps on this page to create a new API token in the TriplyDB GUI. require('source-map-support/register') import App from '@triply/triplydb' const triply = App.get({ token: process.env.TOKEN }) async function run() { // This is where the code examples in this reference section should be placed. } run().catch(e => { console.error(e) process.exit(1) }) process.on('uncaughtException', function (e) { console.error('Uncaught exception', e) process.exit(1) }) process.on('unhandledRejection', (reason, p) => { console.error('Unhandled Rejection at: Promise', p, 'reason:', reason) process.exit(1) }) The following sections document the various TriplyDB.js classes. Each class comes with its own methods. Classes are related through methods that connect them. For example, calling the getAccount method on a App object returns an Account object.","title":"Overview"},{"location":"triplydb-js/account/","text":"On this page: Account Account.addDataset(name: string, metadata?: object) Access restrictions Arguments Examples See also Account.addQuery(name: string, metadata: object) Arguments Example Account.addStory(name: string, metadata?: object) Required Optional Examples Account.asOrganization() Examples Alternatives See also Account.asUser() Examples Alternatives See also Account.ensureDataset(name: string, metadata?: object) Example See also Account.getDataset(name: string) Examples See also Account.getDatasets() Access restrictions Examples Account.getInfo() Examples Account.getPinnedItems() Order considerations Examples See also Account.getQuery(name: string) Examples See also Account.getQueries() Access restrictions Examples See also Account.ensureStory(name: string, metadata: object) Optional Account.addStory(name: string, newStoryOptions?: object) Required Optional Account.getStory(name: string) Examples See also Account.getStories() Examples See also Account.pinItems(items: array[Dataset|Story|Query]) Account.setAvatar(file: string) Examples Account.update(metadata: object) Account \u00b6 Instances of the Account class denote TriplyDB accounts. Accounts can be either organizations ( Organization ) or users ( User ). Account objects are obtained by calling the following method: App.getAccount(name?: string) Account.addDataset(name: string, metadata?: object) \u00b6 Adds a new TriplyDB dataset with the given name to the current account. The optional metadata argument is used to specify the metadata for the dataset. Access restrictions \u00b6 Creating a new dataset only succeeds if an API token is configured that provides write access to the current account. The default access level for a newly created dataset is private . If you want to publish a dataset with a different access level, you must specify the accessLevel key in the metadata argument. Arguments \u00b6 The name argument specifies the URL-friendly name of the new dataset. The name must only contain alphanumeric characters and hyphens ( [A-Za-z0-9\\-] ). The full URL of the newly created dataset has the following structure: https://{host}/{account}/{dataset} The metadata argument optionally specifies the access level and other important metadata: accessLevel The access level of the dataset. The following values are supported: 'private' (default) The dataset can only be accessed by organization members. 'internal' The dataset can only be accessed by users that are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. When no access level is specified, the most restrictive access level ( private ) is used. description The human-readable description of the dataset. This description can make use of Markdown. displayName The human-readable name of the dataset. This name may contain spaces and other characters that are not allowed in the URL-friendly name. license The license of the dataset. The following license strings are currently supported: 'CC-BY-SA' 'CC0 1.0' 'GFDL' 'ODC-By' 'ODC-ODbL' 'PDDL' 'None' (default) prefixes The IRI prefix declarations that are configured for the dataset. This is specified as a dictionary object whose keys are aliases and whose values are IRI prefixes. Examples \u00b6 The following snippet creates a new dataset called 'iris' under the account called 'Triply' : The dataset has private access, because the access level is not specified explicitly. The dataset has a description. The dataset has a display name. The dataset has the PDDL license. const account = await triply.getAccount('Triply') const dataset = await account.addDataset('iris', { description: 'A multivariate dataset that quantifies morphologic variation of Iris flowers.', displayName: 'Iris', license: 'PDDL', name: 'iris', prefixes: { def: 'https://triplydb.com/Triply/iris/def/', id: 'https://triplydb.com/Triply/iris/id/', }, }) See also \u00b6 This method returns a dataset object. See the Dataset section for an overview of the methods that can be called on such objects. Account.addQuery(name: string, metadata: object) \u00b6 Adds a new SPARQL query to the account. Arguments \u00b6 Required: name: string The URL-friendly name of the new query. The name must only contain alphanumeric characters and hyphens ( [A-Za-z0-9\\-] ). dataset: Dataset An instance of class Dataset that the current API token gives access to. serviceType: \"speedy\" | \"virtuoso\" | \"jena\" | \"blazegraph\" A service type that will be used as a preferred SPARQL service to execute the query against. If serviceType is \"virtuoso\", \"jena\" or \"blazegraph\", that service is expected to exist in the dataset before the query can be successfully run. See Dataset queryString: string The SPARQL query string (e.g., 'select * { ?s ?p ?o }' ). Optional: accessLevel The access level of the query. If none is set it defaults to 'private' . The following values are supported: 'private' The query can only be accessed by the Account object for which it is created. 'internal' The query can only be accessed by people who are logged into the TriplyDB server. 'public' The query can be accessed by everybody. description: string A human-readable description of the query. displayName: string The human-readable name of the query. This name may include spaces and other characters that are not allowed in the URL-friendly name . output: string The visualization plugin that is used to display the result set of the query. If none is set it will either set ldFrame if provided or default to table . 'boolean' The boolean view is a special view for ask queries. The value is either 'true' or 'false', and is visualized as `X` (False) or `V` (True). 'gallery' The gallery view allows SPARQL results to be displayed in an HTML gallery. 'gchart' The gchart renders geographical, temporal and numerical data in interactive charts such as bar-, line- and pie charts. 'geo' The geo allows SPARQL results that contain GeoSPARQL semantics to be automatically interpreted and displayed on a 2D map. 'geoEvents' The geoEvents plugin renders geographical events as a story map. 'geo3d' The geo3d allows SPARQL results that contain GeoSPARQL semantics to be automatically interpreted and displayed on a 3D globe. 'markup' The markup can be used to render a variety of markup languages. This requires the use of the `?markup` variable to identify which variable to render. 'network' The network renders SPARQL Construct results in a graph representation. The maximum amount of results that can be visualized is 1.000 due to performance. 'pivot' The pivot view renders SPARQL results in an interactive pivot table where you are able to aggregate the results by dragging your binding variables to columns or rows. 'response' The response view shows the body of the response and offers a easy way to download the result as a file. 'table' The table view allows SPARQL results to be displayed in a table. Each column in the table corresponds to a variable that belongs to the outer projection. 'timeline' The timeline timeline renders the SPARQL results on a Timeline. ldFrame: object JSON LD frame object used to transform plain JSON LD into a framed JSON. Will be used only if an output is not provided. variables: Variable[] A list of objects with the following keys: IRI variable An object of the form Variable (see below) Instances of Variable are objects that can have the following keys: Required: name: string A SPARQL variable name. The variable name must appear in the query string. The question mark ( ? ) or dollar sign ( $ ) is not included. termType: 'Literal'|'NamedNode' The kind of variable. This must be either 'Literal' for literals or 'NamedNode' for IRIs. Optional: allowedValues: string[] The list of string values that is allowed for this variable. datatype: string (if termType='Literal' ) The datatype IRI for the literal variable. language: string (if termType='Literal' ) The language tag for the literal variable. Setting this implies that the dataset IRI is rdf:langString . defaultValue: string The default string value for the required: boolean Whether a query request must include an explicit value for this variable. The default value is false . Example \u00b6 The following snippet creates a query with the given query string: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const myDataset = await user.getDataset('my-dataset') const query = await user.addQuery('my-query', { dataset: myDataset, serviceType: \"speedy\", queryString: 'select (count(*) as ?n) { ?s ?p ?o. }', output: 'response', }) Account.addStory(name: string, metadata?: object) \u00b6 Adds a new data story. Required \u00b6 name: string The URL-friendly name of the data story. The name must only contain alphanumeric characters and hyphens ( [A-Za-z0-9\\-] ). Optional \u00b6 accessLevel The access level of the dataset. If none is given the default of 'private' is used. The following values are supported: 'private' The dataset can only be accessed by the Account object for which it is created. 'internal' The dataset can only be accessed by people who are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. content: StoryElementUpdate[] A list of story elements. The building blocks of the Story. displayName: string The human-readable name of the data story. This name may include spaces and other characters that are not allowed in the URL-friendly name. A story element is an object with the following keys: caption: string The caption is an explanatory text about a specific query. id: string Each Story element gets an Id when it is created. When you want to update a Story element you will need this Id. The Id is only required when updating an element and not needed when adding an object. paragraph: string The Markdown content of a story paragraph. Only allowed when the type is set to 'paragraph' query: Query An instance of class Query . queryVersion: number The version that is used of the specified query. type Either 'paragraph' or 'query' . Examples \u00b6 Example 1 - creates a new story that has access level 'private' : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const newStory = await user.addStory('name-of-story') Example 2 - creates a new story that has access level 'public' : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const newStory = await user.addStory('name-of-story', { accessLevel: 'public', }) Account.asOrganization() \u00b6 Casts the TriplyDB account object to its corresponding organization object. Class Organization is a specialization of class Account . Calling this method on an Organization object does nothing. Examples \u00b6 The following snippet retrieves the account named 'Triply' and casts it to an organization: const account = await triply.getAccount('Triply') const organization = account.asOrganization() Alternatives \u00b6 This method is not needed if the organization is directly retrieved with the specialization method App.getOrganization(name: string) . The following snippet returns the same result as the above example, but in a more direct way: const organization = await triply.getOrganization('Triply') See also \u00b6 This method returns an organization object. See class Organization for an overview of the methods that can be called on such objects. Account.asUser() \u00b6 Casts the TriplyDB account object to its corresponding user object. Class User is a specialization of class Account . Calling this method on a User object does nothing. Examples \u00b6 The following snippet retrieves the account that represents the current user, and casts it to a user object: const account = await triply.getAccount() const user = account.asUser() Alternatives \u00b6 This method is not needed if the user is directly retrieved with the specialization method App.getUser(name?: string) . The following snippet returns the same result as the above example, but in a more direct way: const user = await triply.getUser() See also \u00b6 This method returns an organization object. See class Organization for an overview of the methods that can be called on such objects. Account.ensureDataset(name: string, metadata?: object) \u00b6 Ensures the existence of a dataset with the given name and with the specified metadata if given. Calling this method ensures that the necessary changes (if any) are made in the connected-to TriplyDB server that result in an end state in which a dataset with the given name and metadata exists. This method is useful in practice, because it removes the burden on the programmer to have to write custom code for checking for the existence of a dataset, and conditionally create a new dataset or make metadata changes to an existing dataset. The changes made as a result of calling this method depend on the current state of the connected-to TriplyDB server: If this account does not yet have a dataset with the given name , then the behavior is identical to calling Account.addDataset(name: string, metadata?: object) with the same arguments. If this account already has a dataset with the given name and with the same metadata , then this method makes no changes. Example \u00b6 const account = await triply.getAccount('Triply') const myDataset = await account.ensureDataset(`my-dataset`, { license: 'PDDL', }) See also \u00b6 The meaning of the argument to this method are identical to those of the Account.addDataset(name: string, metadata?: object) method. Account.getDataset(name: string) \u00b6 Returns the dataset with the given name that is published by this account. Examples \u00b6 The following snippet prints the name of the Iris dataset that is published by the Triply account: const account = await triply.getAccount('Triply') const dataset = await triply.getDataset('iris') console.log((await dataset.getInfo()).name) See also \u00b6 This method returns a dataset object. See class Dataset for an overview of the methods that can be called on such objects. Account.getDatasets() \u00b6 Returns an async iterator over the accessible datasets for the current account. Access restrictions \u00b6 The iterator only includes datasets that are accessible for the current connection with a TriplyDB server: If no API token is configured, the iterator will include all and only public datasets belonging to this account. If an API token is configured, the iterator will include all public and internal datasets belonging to this account, and will include all private datasets belonging to this account if the API token gives read access to the account. Examples \u00b6 The following snippet prints the names of all accessible dataset under the Triply account: const account = await triply.getAccount('Triply') for await (const dataset of account.getDatasets()) { console.log((await dataset.getInfo()).name) } The following snippet prints the list of names of all accessible datasets under the Triply account: const account = await triply.getAccount('Triply') console.log(await account.getDatasets().toArray()) Account.getInfo() \u00b6 Returns information about this account. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. The information object for accounts includes the following keys: avatarUrl A URL to the account image. accountName The URL-friendly name of the account. name The human-readable display name of the account description The human-readable description of the account. createdAt The date and time on which the account was created. datasetCount The number of datasets for the account. queryCount The number of queries for the account. storyCount The number of stories for the account pinnedDatasets An array containing the pinned dataset for the account. pinnedItems An array containing the pinned items (datasets, stories and queries) for the account. type The account type: either organization or user . role The role of the account orgs An array of organizations of which the account is a member. Email address The email address of the account. updatedAt The date and time on which the account was last updated. lastActivity The date and time on which the account was last online on TriplyDB. Examples \u00b6 The following snippet prints the full information object for the account called \u2018Triply\u2019: const account = await triply.getAccount('Triply') console.log(await account.getInfo()) The output for this snippet can look as follows: { 'accountName': 'Triply', 'avatarUrl': 'https://www.gravatar.com/avatar/9bc28997dd1074e405e1c66196d5e117?d=mm', 'createdAt': 'Mon Mar 19 2018 14:39:18 GMT+0000 (Coordinated Universal Time)', 'datasetCount': 16, 'name': 'Triply', 'queryCount': 37, 'storyCount': 7, 'type': 'org', 'updatedAt': 'Tue Nov 27 2018 09:29:38 GMT+0000 (Coordinated Universal Time)' } The following snippet prints the name of the account called \u2018Triply\u2019: const account = await triply.getAccount('Triply') console.log((await account.getInfo()).name) Account.getPinnedItems() \u00b6 Returns the list of datasets, stories and queries that are pinned for the current account. A pinned item is an item that is displayed in a prominent way on the account web page. Order considerations \u00b6 The order in which the pinned datasets are returned reflects the order in which they appear on the organization homepage (from top-left to bottom-right). Examples \u00b6 The following snippet prints the names of the items that are pinned on the Triply account page: const account = await triply.getAccount('Triply') for await (const item of account.getPinnedItems()) { console.log((await item.getInfo()).name) } See also \u00b6 This method returns various types of objects. Each class has different functionalities: See class Dataset for an overview of the methods for dataset objects. See class Query for an overview of the methods for query objects. See class Story for an overview of the methods for story objects. Account.getQuery(name: string) \u00b6 Returns the TriplyDB query with the given name . Examples \u00b6 The following snippet prints the query string for a query called animal-gallery that belongs to the account called Triply : const account = await triply.getAccount('Triply') const query = await account.getQuery('animal-gallery') console.log((await query.getInfo()).requestConfig?.payload.query) See also \u00b6 See class Query for an overview of the methods for query objects. Account.getQueries() \u00b6 Returns an async iterator over the accessible queries that belong to the account. Access restrictions \u00b6 The iterator only includes datasets that are accessible for the current connection with a TriplyDB server: If no API token is configured, the iterator will include all and only public queries belonging to this account. If an API token is configured, the iterator will include all public and internal queries that belong to this account, and will include all private queries that belong to this account if the API token gives read access to the account. Examples \u00b6 The following snippet prints the names of the queries that belong to the account called Triply : const account = await triply.getAccount('Triply') for await (const query of account.getQueries()) { console.log((await query.getInfo()).name) } See also \u00b6 See class Query for an overview of the methods for query objects. Account.ensureStory(name: string, metadata: object) \u00b6 Ensures the existence of a story with the given name and with the specified metadata , if given. Calling this method ensures that the necessary changes (if any) are made in the connected-to TriplyDB server that result in an end state in which a story with the given name and metadata exists. This method is useful in practice, because it removes the burden on the programmer to have to write custom code for checking for the existence of a story, and conditionally create a new story or make metadata changes to an existing story. The changes made as a result of calling this method depend on the current state of the connected-to TriplyDB server: If this account does not yet have a story with the given name , then the behavior is identical to calling Account.addStory(name: string, metadata?: object) with the same arguments. If this account already has a story with the given name and with the same metadata , then this method returns that story. Optional \u00b6 displayName Accepts a string value to be used as the display name for the story. accessLevel Accepts either of the following values: 'private' (default), 'internal' , 'public' . content Accepts a list of StoryElementUpdate objects, defined below. Note: If no accessLevel is specified, the default used is 'private'. Examples Example 1: To ensure a Story only requires a name of type string. It's access level will default to private await someUser.ensureStory(`someStoryName`) Example 2: Ensure a Story setting it's accessLevel and displayName . await someUser.ensureStory(`someStoryName`, { accessLevel: 'public', displayName: `This is a Story`, }) Account.addStory(name: string, newStoryOptions?: object) \u00b6 Required \u00b6 Adds and returns the TriplyDB story with the given name . Optional \u00b6 The optional new story object that can be passed accepts the following properties: displayName Accepts a string value to be used as a display name for the story accessLevel Sets the access level for the story. Accepts either of the following: 'private' (default), 'internal' , 'public' . If no accesslevel is specified, the default value private is used. Examples : Example 1 - creates a newStory that is 'private' const newStory = await someUser.addStory('name-of-story') Example 2 - creates a newStory that is 'public' const newStory = await someUser.addStory('name-of-story', { accessLevel: 'public', }) Account.getStory(name: string) \u00b6 Returns the TriplyDB story with the given name . Examples \u00b6 The following snippet prints the paragraphs in the story called the-iris-dataset that is published under the account called Triply . Stories are sequences of paragraphs and queries. This program prints the paragraphs in the sequence in which they appear in the story. const account = await triply.getAccount('Triply') const story = await account.getStory('the-iris-dataset') See also \u00b6 See class Story for an overview of the methods for story objects. Account.getStories() \u00b6 Returns an iterator with the TriplyDB stories that belong to the account. Examples \u00b6 The following snippet prints the names of the queries that belong to the Triply account: const account = await triply.getAccount('Triply') for await (const story of account.getStories()) { console.log((await story.getInfo()).name) } See also \u00b6 See class Story for an overview of the methods for story objects. Account.pinItems(items: array[Dataset|Story|Query]) \u00b6 Pins the given datasets, stores, and/or queries to the home page of this account. The pinned elements can be seen by people who visit the account online. They are also included in the account metadata. const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const query = await user.getQuery('name-of-query') const newStory = await user.getStory('name-of-story') user.pinItems([query,newStory]) Account.setAvatar(file: string) \u00b6 Sets a new image that characterizes this account. A circular version of this image is displayed inside the TriplyDB GUI. This image is also published as part of account metadata. Examples \u00b6 The following snippet uploads the local image in file logo.svg and set it as the characterizing image for the Triply account: const account = await triply.getAccount('Triply') await account.setAvatar('logo.svg') Account.update(metadata: object) \u00b6 Updates the metadata for this account. To update the metadata profile with information within the metadata itself, we need the following steps: Obtain the relevant piece of information as a variable/const: getObject() Update the metadata profile with the obtained information stored in the variable/const: update() getObject() Define a constant ( const ) and assign it to ctx.store.getObjects() . The arguments for the function will be the subject, predicate, and graph. The function retrieves the object so the other 3 parts of a quad need to be specified. update() Update the relevant part of the metadata profile with the corresponding piece of information. .update({}) Example If one wants to update the display name of a metadata profile with the object of the following triple within the metadata: 'Example Name'@en async (ctx) => { // Fetch displayName const displayName = ctx.store .getObjects( 'https://example.org/example', 'https://schema.org/name', graph.metadata ) .find( (node) => node.termType === 'Literal' && node.language === 'en' )?.value; // Specify the environment variable, if necessary const _dataset = process.env['MODE'] === 'Production' ? (await app.triplyDb.getOrganization(organization)).getDataset(dataset) : (await app.triplyDb.getUser()).getDataset(organization + '-' + dataset) // Update the display name if (displayName) await (await _dataset).update({ displayName }) }; The metadata object for accounts can include the following keys: accountName The URL-friendly name of the account. name The human-readable display name of the account description The human-readable description of the account. pinnedItems An array containing the pinned items (datasets, stories and queries) for the account. Email address The email address of the account.","title":"Account"},{"location":"triplydb-js/account/#account","text":"Instances of the Account class denote TriplyDB accounts. Accounts can be either organizations ( Organization ) or users ( User ). Account objects are obtained by calling the following method: App.getAccount(name?: string)","title":"Account"},{"location":"triplydb-js/account/#accountadddatasetname-string-metadata-object","text":"Adds a new TriplyDB dataset with the given name to the current account. The optional metadata argument is used to specify the metadata for the dataset.","title":"Account.addDataset(name: string, metadata?: object)"},{"location":"triplydb-js/account/#access-restrictions","text":"Creating a new dataset only succeeds if an API token is configured that provides write access to the current account. The default access level for a newly created dataset is private . If you want to publish a dataset with a different access level, you must specify the accessLevel key in the metadata argument.","title":"Access restrictions"},{"location":"triplydb-js/account/#arguments","text":"The name argument specifies the URL-friendly name of the new dataset. The name must only contain alphanumeric characters and hyphens ( [A-Za-z0-9\\-] ). The full URL of the newly created dataset has the following structure: https://{host}/{account}/{dataset} The metadata argument optionally specifies the access level and other important metadata: accessLevel The access level of the dataset. The following values are supported: 'private' (default) The dataset can only be accessed by organization members. 'internal' The dataset can only be accessed by users that are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. When no access level is specified, the most restrictive access level ( private ) is used. description The human-readable description of the dataset. This description can make use of Markdown. displayName The human-readable name of the dataset. This name may contain spaces and other characters that are not allowed in the URL-friendly name. license The license of the dataset. The following license strings are currently supported: 'CC-BY-SA' 'CC0 1.0' 'GFDL' 'ODC-By' 'ODC-ODbL' 'PDDL' 'None' (default) prefixes The IRI prefix declarations that are configured for the dataset. This is specified as a dictionary object whose keys are aliases and whose values are IRI prefixes.","title":"Arguments"},{"location":"triplydb-js/account/#examples","text":"The following snippet creates a new dataset called 'iris' under the account called 'Triply' : The dataset has private access, because the access level is not specified explicitly. The dataset has a description. The dataset has a display name. The dataset has the PDDL license. const account = await triply.getAccount('Triply') const dataset = await account.addDataset('iris', { description: 'A multivariate dataset that quantifies morphologic variation of Iris flowers.', displayName: 'Iris', license: 'PDDL', name: 'iris', prefixes: { def: 'https://triplydb.com/Triply/iris/def/', id: 'https://triplydb.com/Triply/iris/id/', }, })","title":"Examples"},{"location":"triplydb-js/account/#see-also","text":"This method returns a dataset object. See the Dataset section for an overview of the methods that can be called on such objects.","title":"See also"},{"location":"triplydb-js/account/#accountaddqueryname-string-metadata-object","text":"Adds a new SPARQL query to the account.","title":"Account.addQuery(name: string, metadata: object)"},{"location":"triplydb-js/account/#arguments_1","text":"Required: name: string The URL-friendly name of the new query. The name must only contain alphanumeric characters and hyphens ( [A-Za-z0-9\\-] ). dataset: Dataset An instance of class Dataset that the current API token gives access to. serviceType: \"speedy\" | \"virtuoso\" | \"jena\" | \"blazegraph\" A service type that will be used as a preferred SPARQL service to execute the query against. If serviceType is \"virtuoso\", \"jena\" or \"blazegraph\", that service is expected to exist in the dataset before the query can be successfully run. See Dataset queryString: string The SPARQL query string (e.g., 'select * { ?s ?p ?o }' ). Optional: accessLevel The access level of the query. If none is set it defaults to 'private' . The following values are supported: 'private' The query can only be accessed by the Account object for which it is created. 'internal' The query can only be accessed by people who are logged into the TriplyDB server. 'public' The query can be accessed by everybody. description: string A human-readable description of the query. displayName: string The human-readable name of the query. This name may include spaces and other characters that are not allowed in the URL-friendly name . output: string The visualization plugin that is used to display the result set of the query. If none is set it will either set ldFrame if provided or default to table . 'boolean' The boolean view is a special view for ask queries. The value is either 'true' or 'false', and is visualized as `X` (False) or `V` (True). 'gallery' The gallery view allows SPARQL results to be displayed in an HTML gallery. 'gchart' The gchart renders geographical, temporal and numerical data in interactive charts such as bar-, line- and pie charts. 'geo' The geo allows SPARQL results that contain GeoSPARQL semantics to be automatically interpreted and displayed on a 2D map. 'geoEvents' The geoEvents plugin renders geographical events as a story map. 'geo3d' The geo3d allows SPARQL results that contain GeoSPARQL semantics to be automatically interpreted and displayed on a 3D globe. 'markup' The markup can be used to render a variety of markup languages. This requires the use of the `?markup` variable to identify which variable to render. 'network' The network renders SPARQL Construct results in a graph representation. The maximum amount of results that can be visualized is 1.000 due to performance. 'pivot' The pivot view renders SPARQL results in an interactive pivot table where you are able to aggregate the results by dragging your binding variables to columns or rows. 'response' The response view shows the body of the response and offers a easy way to download the result as a file. 'table' The table view allows SPARQL results to be displayed in a table. Each column in the table corresponds to a variable that belongs to the outer projection. 'timeline' The timeline timeline renders the SPARQL results on a Timeline. ldFrame: object JSON LD frame object used to transform plain JSON LD into a framed JSON. Will be used only if an output is not provided. variables: Variable[] A list of objects with the following keys: IRI variable An object of the form Variable (see below) Instances of Variable are objects that can have the following keys: Required: name: string A SPARQL variable name. The variable name must appear in the query string. The question mark ( ? ) or dollar sign ( $ ) is not included. termType: 'Literal'|'NamedNode' The kind of variable. This must be either 'Literal' for literals or 'NamedNode' for IRIs. Optional: allowedValues: string[] The list of string values that is allowed for this variable. datatype: string (if termType='Literal' ) The datatype IRI for the literal variable. language: string (if termType='Literal' ) The language tag for the literal variable. Setting this implies that the dataset IRI is rdf:langString . defaultValue: string The default string value for the required: boolean Whether a query request must include an explicit value for this variable. The default value is false .","title":"Arguments"},{"location":"triplydb-js/account/#example","text":"The following snippet creates a query with the given query string: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const myDataset = await user.getDataset('my-dataset') const query = await user.addQuery('my-query', { dataset: myDataset, serviceType: \"speedy\", queryString: 'select (count(*) as ?n) { ?s ?p ?o. }', output: 'response', })","title":"Example"},{"location":"triplydb-js/account/#accountaddstoryname-string-metadata-object","text":"Adds a new data story.","title":"Account.addStory(name: string, metadata?: object)"},{"location":"triplydb-js/account/#required","text":"name: string The URL-friendly name of the data story. The name must only contain alphanumeric characters and hyphens ( [A-Za-z0-9\\-] ).","title":"Required"},{"location":"triplydb-js/account/#optional","text":"accessLevel The access level of the dataset. If none is given the default of 'private' is used. The following values are supported: 'private' The dataset can only be accessed by the Account object for which it is created. 'internal' The dataset can only be accessed by people who are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. content: StoryElementUpdate[] A list of story elements. The building blocks of the Story. displayName: string The human-readable name of the data story. This name may include spaces and other characters that are not allowed in the URL-friendly name. A story element is an object with the following keys: caption: string The caption is an explanatory text about a specific query. id: string Each Story element gets an Id when it is created. When you want to update a Story element you will need this Id. The Id is only required when updating an element and not needed when adding an object. paragraph: string The Markdown content of a story paragraph. Only allowed when the type is set to 'paragraph' query: Query An instance of class Query . queryVersion: number The version that is used of the specified query. type Either 'paragraph' or 'query' .","title":"Optional"},{"location":"triplydb-js/account/#examples_1","text":"Example 1 - creates a new story that has access level 'private' : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const newStory = await user.addStory('name-of-story') Example 2 - creates a new story that has access level 'public' : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const newStory = await user.addStory('name-of-story', { accessLevel: 'public', })","title":"Examples"},{"location":"triplydb-js/account/#accountasorganization","text":"Casts the TriplyDB account object to its corresponding organization object. Class Organization is a specialization of class Account . Calling this method on an Organization object does nothing.","title":"Account.asOrganization()"},{"location":"triplydb-js/account/#examples_2","text":"The following snippet retrieves the account named 'Triply' and casts it to an organization: const account = await triply.getAccount('Triply') const organization = account.asOrganization()","title":"Examples"},{"location":"triplydb-js/account/#alternatives","text":"This method is not needed if the organization is directly retrieved with the specialization method App.getOrganization(name: string) . The following snippet returns the same result as the above example, but in a more direct way: const organization = await triply.getOrganization('Triply')","title":"Alternatives"},{"location":"triplydb-js/account/#see-also_1","text":"This method returns an organization object. See class Organization for an overview of the methods that can be called on such objects.","title":"See also"},{"location":"triplydb-js/account/#accountasuser","text":"Casts the TriplyDB account object to its corresponding user object. Class User is a specialization of class Account . Calling this method on a User object does nothing.","title":"Account.asUser()"},{"location":"triplydb-js/account/#examples_3","text":"The following snippet retrieves the account that represents the current user, and casts it to a user object: const account = await triply.getAccount() const user = account.asUser()","title":"Examples"},{"location":"triplydb-js/account/#alternatives_1","text":"This method is not needed if the user is directly retrieved with the specialization method App.getUser(name?: string) . The following snippet returns the same result as the above example, but in a more direct way: const user = await triply.getUser()","title":"Alternatives"},{"location":"triplydb-js/account/#see-also_2","text":"This method returns an organization object. See class Organization for an overview of the methods that can be called on such objects.","title":"See also"},{"location":"triplydb-js/account/#accountensuredatasetname-string-metadata-object","text":"Ensures the existence of a dataset with the given name and with the specified metadata if given. Calling this method ensures that the necessary changes (if any) are made in the connected-to TriplyDB server that result in an end state in which a dataset with the given name and metadata exists. This method is useful in practice, because it removes the burden on the programmer to have to write custom code for checking for the existence of a dataset, and conditionally create a new dataset or make metadata changes to an existing dataset. The changes made as a result of calling this method depend on the current state of the connected-to TriplyDB server: If this account does not yet have a dataset with the given name , then the behavior is identical to calling Account.addDataset(name: string, metadata?: object) with the same arguments. If this account already has a dataset with the given name and with the same metadata , then this method makes no changes.","title":"Account.ensureDataset(name: string, metadata?: object)"},{"location":"triplydb-js/account/#example_1","text":"const account = await triply.getAccount('Triply') const myDataset = await account.ensureDataset(`my-dataset`, { license: 'PDDL', })","title":"Example"},{"location":"triplydb-js/account/#see-also_3","text":"The meaning of the argument to this method are identical to those of the Account.addDataset(name: string, metadata?: object) method.","title":"See also"},{"location":"triplydb-js/account/#accountgetdatasetname-string","text":"Returns the dataset with the given name that is published by this account.","title":"Account.getDataset(name: string)"},{"location":"triplydb-js/account/#examples_4","text":"The following snippet prints the name of the Iris dataset that is published by the Triply account: const account = await triply.getAccount('Triply') const dataset = await triply.getDataset('iris') console.log((await dataset.getInfo()).name)","title":"Examples"},{"location":"triplydb-js/account/#see-also_4","text":"This method returns a dataset object. See class Dataset for an overview of the methods that can be called on such objects.","title":"See also"},{"location":"triplydb-js/account/#accountgetdatasets","text":"Returns an async iterator over the accessible datasets for the current account.","title":"Account.getDatasets()"},{"location":"triplydb-js/account/#access-restrictions_1","text":"The iterator only includes datasets that are accessible for the current connection with a TriplyDB server: If no API token is configured, the iterator will include all and only public datasets belonging to this account. If an API token is configured, the iterator will include all public and internal datasets belonging to this account, and will include all private datasets belonging to this account if the API token gives read access to the account.","title":"Access restrictions"},{"location":"triplydb-js/account/#examples_5","text":"The following snippet prints the names of all accessible dataset under the Triply account: const account = await triply.getAccount('Triply') for await (const dataset of account.getDatasets()) { console.log((await dataset.getInfo()).name) } The following snippet prints the list of names of all accessible datasets under the Triply account: const account = await triply.getAccount('Triply') console.log(await account.getDatasets().toArray())","title":"Examples"},{"location":"triplydb-js/account/#accountgetinfo","text":"Returns information about this account. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. The information object for accounts includes the following keys: avatarUrl A URL to the account image. accountName The URL-friendly name of the account. name The human-readable display name of the account description The human-readable description of the account. createdAt The date and time on which the account was created. datasetCount The number of datasets for the account. queryCount The number of queries for the account. storyCount The number of stories for the account pinnedDatasets An array containing the pinned dataset for the account. pinnedItems An array containing the pinned items (datasets, stories and queries) for the account. type The account type: either organization or user . role The role of the account orgs An array of organizations of which the account is a member. Email address The email address of the account. updatedAt The date and time on which the account was last updated. lastActivity The date and time on which the account was last online on TriplyDB.","title":"Account.getInfo()"},{"location":"triplydb-js/account/#examples_6","text":"The following snippet prints the full information object for the account called \u2018Triply\u2019: const account = await triply.getAccount('Triply') console.log(await account.getInfo()) The output for this snippet can look as follows: { 'accountName': 'Triply', 'avatarUrl': 'https://www.gravatar.com/avatar/9bc28997dd1074e405e1c66196d5e117?d=mm', 'createdAt': 'Mon Mar 19 2018 14:39:18 GMT+0000 (Coordinated Universal Time)', 'datasetCount': 16, 'name': 'Triply', 'queryCount': 37, 'storyCount': 7, 'type': 'org', 'updatedAt': 'Tue Nov 27 2018 09:29:38 GMT+0000 (Coordinated Universal Time)' } The following snippet prints the name of the account called \u2018Triply\u2019: const account = await triply.getAccount('Triply') console.log((await account.getInfo()).name)","title":"Examples"},{"location":"triplydb-js/account/#accountgetpinneditems","text":"Returns the list of datasets, stories and queries that are pinned for the current account. A pinned item is an item that is displayed in a prominent way on the account web page.","title":"Account.getPinnedItems()"},{"location":"triplydb-js/account/#order-considerations","text":"The order in which the pinned datasets are returned reflects the order in which they appear on the organization homepage (from top-left to bottom-right).","title":"Order considerations"},{"location":"triplydb-js/account/#examples_7","text":"The following snippet prints the names of the items that are pinned on the Triply account page: const account = await triply.getAccount('Triply') for await (const item of account.getPinnedItems()) { console.log((await item.getInfo()).name) }","title":"Examples"},{"location":"triplydb-js/account/#see-also_5","text":"This method returns various types of objects. Each class has different functionalities: See class Dataset for an overview of the methods for dataset objects. See class Query for an overview of the methods for query objects. See class Story for an overview of the methods for story objects.","title":"See also"},{"location":"triplydb-js/account/#accountgetqueryname-string","text":"Returns the TriplyDB query with the given name .","title":"Account.getQuery(name: string)"},{"location":"triplydb-js/account/#examples_8","text":"The following snippet prints the query string for a query called animal-gallery that belongs to the account called Triply : const account = await triply.getAccount('Triply') const query = await account.getQuery('animal-gallery') console.log((await query.getInfo()).requestConfig?.payload.query)","title":"Examples"},{"location":"triplydb-js/account/#see-also_6","text":"See class Query for an overview of the methods for query objects.","title":"See also"},{"location":"triplydb-js/account/#accountgetqueries","text":"Returns an async iterator over the accessible queries that belong to the account.","title":"Account.getQueries()"},{"location":"triplydb-js/account/#access-restrictions_2","text":"The iterator only includes datasets that are accessible for the current connection with a TriplyDB server: If no API token is configured, the iterator will include all and only public queries belonging to this account. If an API token is configured, the iterator will include all public and internal queries that belong to this account, and will include all private queries that belong to this account if the API token gives read access to the account.","title":"Access restrictions"},{"location":"triplydb-js/account/#examples_9","text":"The following snippet prints the names of the queries that belong to the account called Triply : const account = await triply.getAccount('Triply') for await (const query of account.getQueries()) { console.log((await query.getInfo()).name) }","title":"Examples"},{"location":"triplydb-js/account/#see-also_7","text":"See class Query for an overview of the methods for query objects.","title":"See also"},{"location":"triplydb-js/account/#accountensurestoryname-string-metadata-object","text":"Ensures the existence of a story with the given name and with the specified metadata , if given. Calling this method ensures that the necessary changes (if any) are made in the connected-to TriplyDB server that result in an end state in which a story with the given name and metadata exists. This method is useful in practice, because it removes the burden on the programmer to have to write custom code for checking for the existence of a story, and conditionally create a new story or make metadata changes to an existing story. The changes made as a result of calling this method depend on the current state of the connected-to TriplyDB server: If this account does not yet have a story with the given name , then the behavior is identical to calling Account.addStory(name: string, metadata?: object) with the same arguments. If this account already has a story with the given name and with the same metadata , then this method returns that story.","title":"Account.ensureStory(name: string, metadata: object)"},{"location":"triplydb-js/account/#optional_1","text":"displayName Accepts a string value to be used as the display name for the story. accessLevel Accepts either of the following values: 'private' (default), 'internal' , 'public' . content Accepts a list of StoryElementUpdate objects, defined below. Note: If no accessLevel is specified, the default used is 'private'. Examples Example 1: To ensure a Story only requires a name of type string. It's access level will default to private await someUser.ensureStory(`someStoryName`) Example 2: Ensure a Story setting it's accessLevel and displayName . await someUser.ensureStory(`someStoryName`, { accessLevel: 'public', displayName: `This is a Story`, })","title":"Optional"},{"location":"triplydb-js/account/#accountaddstoryname-string-newstoryoptions-object","text":"","title":"Account.addStory(name: string, newStoryOptions?: object)"},{"location":"triplydb-js/account/#required_1","text":"Adds and returns the TriplyDB story with the given name .","title":"Required"},{"location":"triplydb-js/account/#optional_2","text":"The optional new story object that can be passed accepts the following properties: displayName Accepts a string value to be used as a display name for the story accessLevel Sets the access level for the story. Accepts either of the following: 'private' (default), 'internal' , 'public' . If no accesslevel is specified, the default value private is used. Examples : Example 1 - creates a newStory that is 'private' const newStory = await someUser.addStory('name-of-story') Example 2 - creates a newStory that is 'public' const newStory = await someUser.addStory('name-of-story', { accessLevel: 'public', })","title":"Optional"},{"location":"triplydb-js/account/#accountgetstoryname-string","text":"Returns the TriplyDB story with the given name .","title":"Account.getStory(name: string)"},{"location":"triplydb-js/account/#examples_10","text":"The following snippet prints the paragraphs in the story called the-iris-dataset that is published under the account called Triply . Stories are sequences of paragraphs and queries. This program prints the paragraphs in the sequence in which they appear in the story. const account = await triply.getAccount('Triply') const story = await account.getStory('the-iris-dataset')","title":"Examples"},{"location":"triplydb-js/account/#see-also_8","text":"See class Story for an overview of the methods for story objects.","title":"See also"},{"location":"triplydb-js/account/#accountgetstories","text":"Returns an iterator with the TriplyDB stories that belong to the account.","title":"Account.getStories()"},{"location":"triplydb-js/account/#examples_11","text":"The following snippet prints the names of the queries that belong to the Triply account: const account = await triply.getAccount('Triply') for await (const story of account.getStories()) { console.log((await story.getInfo()).name) }","title":"Examples"},{"location":"triplydb-js/account/#see-also_9","text":"See class Story for an overview of the methods for story objects.","title":"See also"},{"location":"triplydb-js/account/#accountpinitemsitems-arraydatasetstoryquery","text":"Pins the given datasets, stores, and/or queries to the home page of this account. The pinned elements can be seen by people who visit the account online. They are also included in the account metadata. const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const query = await user.getQuery('name-of-query') const newStory = await user.getStory('name-of-story') user.pinItems([query,newStory])","title":"Account.pinItems(items: array[Dataset|Story|Query])"},{"location":"triplydb-js/account/#accountsetavatarfile-string","text":"Sets a new image that characterizes this account. A circular version of this image is displayed inside the TriplyDB GUI. This image is also published as part of account metadata.","title":"Account.setAvatar(file: string)"},{"location":"triplydb-js/account/#examples_12","text":"The following snippet uploads the local image in file logo.svg and set it as the characterizing image for the Triply account: const account = await triply.getAccount('Triply') await account.setAvatar('logo.svg')","title":"Examples"},{"location":"triplydb-js/account/#accountupdatemetadata-object","text":"Updates the metadata for this account. To update the metadata profile with information within the metadata itself, we need the following steps: Obtain the relevant piece of information as a variable/const: getObject() Update the metadata profile with the obtained information stored in the variable/const: update() getObject() Define a constant ( const ) and assign it to ctx.store.getObjects() . The arguments for the function will be the subject, predicate, and graph. The function retrieves the object so the other 3 parts of a quad need to be specified. update() Update the relevant part of the metadata profile with the corresponding piece of information. .update({}) Example If one wants to update the display name of a metadata profile with the object of the following triple within the metadata: 'Example Name'@en async (ctx) => { // Fetch displayName const displayName = ctx.store .getObjects( 'https://example.org/example', 'https://schema.org/name', graph.metadata ) .find( (node) => node.termType === 'Literal' && node.language === 'en' )?.value; // Specify the environment variable, if necessary const _dataset = process.env['MODE'] === 'Production' ? (await app.triplyDb.getOrganization(organization)).getDataset(dataset) : (await app.triplyDb.getUser()).getDataset(organization + '-' + dataset) // Update the display name if (displayName) await (await _dataset).update({ displayName }) }; The metadata object for accounts can include the following keys: accountName The URL-friendly name of the account. name The human-readable display name of the account description The human-readable description of the account. pinnedItems An array containing the pinned items (datasets, stories and queries) for the account. Email address The email address of the account.","title":"Account.update(metadata: object)"},{"location":"triplydb-js/app/","text":"On this page: App App.getAccount(name?: string) Examples See also App.getAccounts() Example App.getInfo() Examples App.getOrganization(name: string) Examples Alternatives See also App.getUser(name?: string) Examples Alternatives See also App.isCompatibleWith(minimumVersion: string) Arguments See also App \u00b6 Instances of the App class are specific application connections that are set-up with a TriplyDB server. Connections to TriplyDB servers can be created with and without setting an API token. When no API token is set, the connection can be used to perform read-only operations over public data. When an API token is set, the connection can be used to perform read/write operations over public/private data the API token grants access to. The following snippet creates an instance of the App object that establishes read-only access to the TriplyDB server at https://triplydb.com : import App from '@triply/triplydb' const triply = App.get({ url: 'https://api.triplydb.com' }) Notice that the URL must point to the API of the TriplyDB server that the App object connects to. The API URL is typically created by adding the api. subdomain in front of the server's host name. For example, since [1] is the web-based GUI for the TriplyDB server, then [2] is the corresponding API for that instance. [1] https://triplydb.com [2] https://api.triplydb.com When an API token is specified, the operations that can be performed through the App object are determined by: The access level of the token: either \u201cRead access\u201d, \u201cWrite access\u201d, or \u201cManagement access\u201d. The credentials of the user account for which the API token is created. When a user is a member of an organization, she has access to all its datasets, stories, and queries; a user always has access to her own datasets, stores and queries. The following token access levels are available: 1. \u201cRead access\u201d allows: Read operations over data with access level \u201cPublic\u201d. Read operations over data with access level \u201cInternal\u201d. Read operations over data with access level \u201cPrivate\u201d that belongs to the user who created the token. Read operations over data with access level \u201cPrivate\u201d that belongs to organizations to which the user who created the token is a member. 2. \u201cWrite access\u201d allows: All operations allows by \u201cRead access\u201d. Write operations over data that has access setting \u201cInternal\u201d. Write operations over data 3. \u201cManagement access\u201d allows the following operations to be performed: creating organizations, adding/removing members to/from organizations. The following creates an App object with an API token that is made available through an environment variable: import App from '@triply/triplydb' const triply = App.get({ token: process.env.TOKEN }) It is typical for one TriplyDB.js script to have exactly one App object. App.getAccount(name?: string) \u00b6 Returns the TriplyDB account with the given name . If name is omitted, the TriplyDB account that is associated with the current API token is returned. Examples \u00b6 The following snippet returns the account called 'Triply' . const account = await triply.getAccount('Triply') The following snippet returns the current account. This is the account for which the currently configured API token was created. const account = await triply.getAccount() See also \u00b6 This method returns an account object. See class Account for an overview of the methods that can be called on such objects. Class Account has two specializations: class Organization and class User . In line with these class specializations, there are also two method specializations: Method App.getOrganization(name: string) returns an organization object. Method App.getUser(name?: string) returns a user object. App.getAccounts() \u00b6 Returns an async iterator over all accounts in the TriplyDB server. Example \u00b6 The following snippet prints the display names for all accounts in the TriplyDB server at https://triplydb.com : const triply = App.get({ url: 'https://api.triplydb.com' }) for await (const account of triply.getAccounts()) { console.log((await account.getInfo()).name) } The following snippet returns an array that contains all account objects: console.log(await triply.getAccounts().toArray()) See class Account for an overview of the methods that can be used with account objects. App.getInfo() \u00b6 Returns information about the TriplyDB server that the App is connected to. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. Examples \u00b6 The following snippet prints the contact email for the TriplyDB server: console.log((await triply.getInfo()).contactEmail) The following snippet returns an object describing the used TriplyDB server: console.log(await triply.getInfo()) App.getOrganization(name: string) \u00b6 Returns the TriplyDB organization with the given name . This method is similar to App.getAccount(name?: string) , but differs in the following ways: This method only works for accounts that represent TriplyDB organizations. This method returns an organization object. Class Organization is a specialization of class Account . Examples \u00b6 The following snippet returns the organization called 'Triply' : const organization = await triply.getOrganization('Triply') See class Organization for an overview of the methods that can be used with organization objects. Alternatives \u00b6 This method is a shorthand for calling the following two methods: Call method App.getAccount(name?: string) to retrieve an account object. Then call method Account.asOrganization() to cast the account object into an organization object. The following snippet returns the same result as the previous example, but uses two methods instead of one: const account = await triply.getAccount('Triply') const organization = account.asOrganization() See also \u00b6 This method returns an organization object. See class Organization for an overview of the methods that can be called on such objects. App.getUser(name?: string) \u00b6 Returns the TriplyDB user with the given name . If name is omitted, the TriplyDB user that is associated with the current API token is returned. This only works if an API token is configured for the current App object. Examples \u00b6 The following snippet returns the user with name 'somebody' : const user = await triply.getUser('somebody') The following snippet returns the user for whom the API token was created. This only works if an API token was configured when the App object was created: const me = await triply.getUser() Alternatives \u00b6 This method is a shorthand for the following two methods: Call method App.getAccount() to retrieve an account object. Then call method Account.asUser() to cast the account object into a user object. The following snippet returns the same result as the previous examples, but uses two methods instead of one: const account = await triply.getAccount('somebody') const user = account.asUser() See also \u00b6 This method returns a user object. See class User for an overview of the methods that can be called on such objects. App.isCompatibleWith(minimumVersion: string) \u00b6 Succeeds if and only if the currently connected to TriplyDB server has a version that is identical to or higher than the given minimum version. Arguments \u00b6 Argument minimumVersion must be a string that uses Semantic Versioning. For example '1.2.3' . See also \u00b6 To inspect the current version of the connected-to TriplyDB server, use App.getInfo() .","title":"App"},{"location":"triplydb-js/app/#app","text":"Instances of the App class are specific application connections that are set-up with a TriplyDB server. Connections to TriplyDB servers can be created with and without setting an API token. When no API token is set, the connection can be used to perform read-only operations over public data. When an API token is set, the connection can be used to perform read/write operations over public/private data the API token grants access to. The following snippet creates an instance of the App object that establishes read-only access to the TriplyDB server at https://triplydb.com : import App from '@triply/triplydb' const triply = App.get({ url: 'https://api.triplydb.com' }) Notice that the URL must point to the API of the TriplyDB server that the App object connects to. The API URL is typically created by adding the api. subdomain in front of the server's host name. For example, since [1] is the web-based GUI for the TriplyDB server, then [2] is the corresponding API for that instance. [1] https://triplydb.com [2] https://api.triplydb.com When an API token is specified, the operations that can be performed through the App object are determined by: The access level of the token: either \u201cRead access\u201d, \u201cWrite access\u201d, or \u201cManagement access\u201d. The credentials of the user account for which the API token is created. When a user is a member of an organization, she has access to all its datasets, stories, and queries; a user always has access to her own datasets, stores and queries. The following token access levels are available: 1. \u201cRead access\u201d allows: Read operations over data with access level \u201cPublic\u201d. Read operations over data with access level \u201cInternal\u201d. Read operations over data with access level \u201cPrivate\u201d that belongs to the user who created the token. Read operations over data with access level \u201cPrivate\u201d that belongs to organizations to which the user who created the token is a member. 2. \u201cWrite access\u201d allows: All operations allows by \u201cRead access\u201d. Write operations over data that has access setting \u201cInternal\u201d. Write operations over data 3. \u201cManagement access\u201d allows the following operations to be performed: creating organizations, adding/removing members to/from organizations. The following creates an App object with an API token that is made available through an environment variable: import App from '@triply/triplydb' const triply = App.get({ token: process.env.TOKEN }) It is typical for one TriplyDB.js script to have exactly one App object.","title":"App"},{"location":"triplydb-js/app/#appgetaccountname-string","text":"Returns the TriplyDB account with the given name . If name is omitted, the TriplyDB account that is associated with the current API token is returned.","title":"App.getAccount(name?: string)"},{"location":"triplydb-js/app/#examples","text":"The following snippet returns the account called 'Triply' . const account = await triply.getAccount('Triply') The following snippet returns the current account. This is the account for which the currently configured API token was created. const account = await triply.getAccount()","title":"Examples"},{"location":"triplydb-js/app/#see-also","text":"This method returns an account object. See class Account for an overview of the methods that can be called on such objects. Class Account has two specializations: class Organization and class User . In line with these class specializations, there are also two method specializations: Method App.getOrganization(name: string) returns an organization object. Method App.getUser(name?: string) returns a user object.","title":"See also"},{"location":"triplydb-js/app/#appgetaccounts","text":"Returns an async iterator over all accounts in the TriplyDB server.","title":"App.getAccounts()"},{"location":"triplydb-js/app/#example","text":"The following snippet prints the display names for all accounts in the TriplyDB server at https://triplydb.com : const triply = App.get({ url: 'https://api.triplydb.com' }) for await (const account of triply.getAccounts()) { console.log((await account.getInfo()).name) } The following snippet returns an array that contains all account objects: console.log(await triply.getAccounts().toArray()) See class Account for an overview of the methods that can be used with account objects.","title":"Example"},{"location":"triplydb-js/app/#appgetinfo","text":"Returns information about the TriplyDB server that the App is connected to. Information is returned in a dictionary object. Individual keys can be accessed for specific information values.","title":"App.getInfo()"},{"location":"triplydb-js/app/#examples_1","text":"The following snippet prints the contact email for the TriplyDB server: console.log((await triply.getInfo()).contactEmail) The following snippet returns an object describing the used TriplyDB server: console.log(await triply.getInfo())","title":"Examples"},{"location":"triplydb-js/app/#appgetorganizationname-string","text":"Returns the TriplyDB organization with the given name . This method is similar to App.getAccount(name?: string) , but differs in the following ways: This method only works for accounts that represent TriplyDB organizations. This method returns an organization object. Class Organization is a specialization of class Account .","title":"App.getOrganization(name: string)"},{"location":"triplydb-js/app/#examples_2","text":"The following snippet returns the organization called 'Triply' : const organization = await triply.getOrganization('Triply') See class Organization for an overview of the methods that can be used with organization objects.","title":"Examples"},{"location":"triplydb-js/app/#alternatives","text":"This method is a shorthand for calling the following two methods: Call method App.getAccount(name?: string) to retrieve an account object. Then call method Account.asOrganization() to cast the account object into an organization object. The following snippet returns the same result as the previous example, but uses two methods instead of one: const account = await triply.getAccount('Triply') const organization = account.asOrganization()","title":"Alternatives"},{"location":"triplydb-js/app/#see-also_1","text":"This method returns an organization object. See class Organization for an overview of the methods that can be called on such objects.","title":"See also"},{"location":"triplydb-js/app/#appgetusername-string","text":"Returns the TriplyDB user with the given name . If name is omitted, the TriplyDB user that is associated with the current API token is returned. This only works if an API token is configured for the current App object.","title":"App.getUser(name?: string)"},{"location":"triplydb-js/app/#examples_3","text":"The following snippet returns the user with name 'somebody' : const user = await triply.getUser('somebody') The following snippet returns the user for whom the API token was created. This only works if an API token was configured when the App object was created: const me = await triply.getUser()","title":"Examples"},{"location":"triplydb-js/app/#alternatives_1","text":"This method is a shorthand for the following two methods: Call method App.getAccount() to retrieve an account object. Then call method Account.asUser() to cast the account object into a user object. The following snippet returns the same result as the previous examples, but uses two methods instead of one: const account = await triply.getAccount('somebody') const user = account.asUser()","title":"Alternatives"},{"location":"triplydb-js/app/#see-also_2","text":"This method returns a user object. See class User for an overview of the methods that can be called on such objects.","title":"See also"},{"location":"triplydb-js/app/#appiscompatiblewithminimumversion-string","text":"Succeeds if and only if the currently connected to TriplyDB server has a version that is identical to or higher than the given minimum version.","title":"App.isCompatibleWith(minimumVersion: string)"},{"location":"triplydb-js/app/#arguments","text":"Argument minimumVersion must be a string that uses Semantic Versioning. For example '1.2.3' .","title":"Arguments"},{"location":"triplydb-js/app/#see-also_3","text":"To inspect the current version of the connected-to TriplyDB server, use App.getInfo() .","title":"See also"},{"location":"triplydb-js/asset/","text":"On this page: Asset Asset.addVersion(path: File | string) Example Asset.delete() Example Asset.getInfo(version?: number) Examples Asset.getVersionInfo(version: number) Examples Asset.selectVersion(version: number) Example Asset.toFile(path: string, version?: number) Example Asset.toStream(version?: number) Example Asset \u00b6 Not all data can be stored as RDF data. For example images and video files use a binary format. Such files can also be stored in TriplyDB as Assets and can be integrated into the Knowledge Graph. Each asset has a specific identifier that can be used in the Knowledge Graph. An asset is always uploaded per dataset, for which the function uploadAsset() is used. see Dataset.uploadAsset() for uploading an asset. If the asset already has been created following functions can retrieve it from the dataset. Dataset.getAsset(assetName: string, versionNumber?: number) Dataset.getAssets() TriplyDB.js supports several functions to manipulate an asset on TriplyDB. Asset.addVersion(path: File | string) \u00b6 Update an asset with a new version of the document using the addVersion function. The input of this function is a path to the file location that you want to update the asset with. The file you want to add as a new version does not in any ways have to correspond to the asset. Example \u00b6 The following snippet uploads the an file my-file.pdf and upload it as the new version of the asset: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') await asset.addVersion('my-file.pdf') Asset.delete() \u00b6 To delete an asset with all of its versions execute the delete() function. Example \u00b6 The following snippet uploads the an file my-file.pdf and upload it as the new version of the asset: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') await asset.delete() Asset.getInfo(version?: number) \u00b6 Returns information about this asset. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. Optionally you can give the version number to retrieve the assetInfo of a particular version. The information object for assets includes the following keys: assetName The URL-friendly name of the asset. identifier The hexadecimal identifier of the asset createdAt The date and time on which the asset was created. url The url of the asset. versions An array containing all versions of the asset. uploadedAt The date and time on which the asset was uploaded. fileSize Number with the bytesize of the asset Examples \u00b6 The following snippet prints the full information object for the asset called \u2018my-asset\u2019: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') console.log(await asset.getInfo()) Asset.getVersionInfo(version: number) \u00b6 Returns version specific information about this asset. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. The version specific information object for assets includes the following keys: id The hexadecimal identifier of the asset fileSize Number with the bytesize of the asset url The url of the asset. uploadedAt The date and time on which the asset was uploaded. Examples \u00b6 The following snippet prints the version information object for the asset called \u2018my-asset\u2019 at version 1 : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') console.log(await asset.getVersionInfo(1)) Asset.selectVersion(version: number) \u00b6 With the selectVersion() function you can select a specific version of an Asset. Each version corresponds to a iteration of the file that is added as an asset. The argument of the selectVersion() function is a number of the version you want to retrieve. Example \u00b6 To select the first asset from the list of assets use the selectVersion with the argument 1 . const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') const versionedAsset = asset.selectVersion(1) Asset.toFile(path: string, version?: number) \u00b6 The binary representation of an asset can be retrieved and written to file via the asset.toFile() function. This function takes as input a string path to the download location and optionally a version number. Example \u00b6 To download the latest version of my-asset asset to the file my-file-location.txt . const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') asset.toFile('my-file-location.txt') Asset.toStream(version?: number) \u00b6 If instead of downloading the asset to a file for later usage you want to directly use the asset. The toStream() functionality is available. This downloads the asset as a stream for use in a script. The toStream() has as optional argument a version number. Example \u00b6 To get the latest version of my-asset asset as a stream available. const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') asset.toStream()","title":"Asset"},{"location":"triplydb-js/asset/#asset","text":"Not all data can be stored as RDF data. For example images and video files use a binary format. Such files can also be stored in TriplyDB as Assets and can be integrated into the Knowledge Graph. Each asset has a specific identifier that can be used in the Knowledge Graph. An asset is always uploaded per dataset, for which the function uploadAsset() is used. see Dataset.uploadAsset() for uploading an asset. If the asset already has been created following functions can retrieve it from the dataset. Dataset.getAsset(assetName: string, versionNumber?: number) Dataset.getAssets() TriplyDB.js supports several functions to manipulate an asset on TriplyDB.","title":"Asset"},{"location":"triplydb-js/asset/#assetaddversionpath-file-string","text":"Update an asset with a new version of the document using the addVersion function. The input of this function is a path to the file location that you want to update the asset with. The file you want to add as a new version does not in any ways have to correspond to the asset.","title":"Asset.addVersion(path: File | string)"},{"location":"triplydb-js/asset/#example","text":"The following snippet uploads the an file my-file.pdf and upload it as the new version of the asset: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') await asset.addVersion('my-file.pdf')","title":"Example"},{"location":"triplydb-js/asset/#assetdelete","text":"To delete an asset with all of its versions execute the delete() function.","title":"Asset.delete()"},{"location":"triplydb-js/asset/#example_1","text":"The following snippet uploads the an file my-file.pdf and upload it as the new version of the asset: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') await asset.delete()","title":"Example"},{"location":"triplydb-js/asset/#assetgetinfoversion-number","text":"Returns information about this asset. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. Optionally you can give the version number to retrieve the assetInfo of a particular version. The information object for assets includes the following keys: assetName The URL-friendly name of the asset. identifier The hexadecimal identifier of the asset createdAt The date and time on which the asset was created. url The url of the asset. versions An array containing all versions of the asset. uploadedAt The date and time on which the asset was uploaded. fileSize Number with the bytesize of the asset","title":"Asset.getInfo(version?: number)"},{"location":"triplydb-js/asset/#examples","text":"The following snippet prints the full information object for the asset called \u2018my-asset\u2019: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') console.log(await asset.getInfo())","title":"Examples"},{"location":"triplydb-js/asset/#assetgetversioninfoversion-number","text":"Returns version specific information about this asset. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. The version specific information object for assets includes the following keys: id The hexadecimal identifier of the asset fileSize Number with the bytesize of the asset url The url of the asset. uploadedAt The date and time on which the asset was uploaded.","title":"Asset.getVersionInfo(version: number)"},{"location":"triplydb-js/asset/#examples_1","text":"The following snippet prints the version information object for the asset called \u2018my-asset\u2019 at version 1 : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') console.log(await asset.getVersionInfo(1))","title":"Examples"},{"location":"triplydb-js/asset/#assetselectversionversion-number","text":"With the selectVersion() function you can select a specific version of an Asset. Each version corresponds to a iteration of the file that is added as an asset. The argument of the selectVersion() function is a number of the version you want to retrieve.","title":"Asset.selectVersion(version: number)"},{"location":"triplydb-js/asset/#example_2","text":"To select the first asset from the list of assets use the selectVersion with the argument 1 . const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') const versionedAsset = asset.selectVersion(1)","title":"Example"},{"location":"triplydb-js/asset/#assettofilepath-string-version-number","text":"The binary representation of an asset can be retrieved and written to file via the asset.toFile() function. This function takes as input a string path to the download location and optionally a version number.","title":"Asset.toFile(path: string, version?: number)"},{"location":"triplydb-js/asset/#example_3","text":"To download the latest version of my-asset asset to the file my-file-location.txt . const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') asset.toFile('my-file-location.txt')","title":"Example"},{"location":"triplydb-js/asset/#assettostreamversion-number","text":"If instead of downloading the asset to a file for later usage you want to directly use the asset. The toStream() functionality is available. This downloads the asset as a stream for use in a script. The toStream() has as optional argument a version number.","title":"Asset.toStream(version?: number)"},{"location":"triplydb-js/asset/#example_4","text":"To get the latest version of my-asset asset as a stream available. const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('my-dataset') const asset = await dataset.getAsset('my-asset') asset.toStream()","title":"Example"},{"location":"triplydb-js/dataset/","text":"On this page: Dataset Dataset.addPrefixes(prefixes: object) Examples Dataset.ensureService(name: string, metadata?: object) Required Optional: metadata Dataset.addService(name: string, metadata?: object) Arguments Required Optional Examples See also Dataset.clear(...resourceType: string) Arguments Examples Dataset.copy(account: string, dataset: string) Examples Dataset.delete() Examples See also Dataset.deleteGraph(name: string) Examples Dataset.describe(iri: string|NamedNode) Examples Dataset.getAsset(name: string, version?: number) Examples Dataset.getAssets() Examples Dataset.getGraph(name: string) Examples Dataset.getGraphs() Examples Dataset.getInfo() Examples Dataset.getPrefixes() Examples Dataset.getService(name: string) Examples Dataset.getServices() Examples Dataset.getStatements({subject?: string, predicate?: string, object?: string, graph?: string}) Arguments Example Get the data locally Dataset.graphsToFile(destinationPath: string, arguments?: object) Optional Examples Dataset.graphsToStore(graph?: Graph) Optional Examples Dataset.graphsToStream(type: 'compressed' | 'rdf-js', arguments?: object) Optional Examples Dataset.importFromDataset(fromDataset: Dataset, arguments?: object) Required Optional Examples Dataset.importFromFiles(files: list(string || File), defaultsConfig?: object) Required Supported files Examples Dataset.importFromStore(store: n3.Store, defaultsConfig?: object) Examples Dataset.importFromUrls(urls: list(string), defaultsConfig?: object) Required Examples Dataset.removeAllGraphs() Examples Dataset.removePrefixes(prefixes: string[]) Examples Dataset.renameGraph(from: string, to: string) Examples Dataset.update(metadata: object) Arguments Example Dataset.uploadAsset( filePath: string, opts?: {mode?: 'throw-if-exists'| 'replace-if-exists'| 'append-version', name?: string}) User cases Examples Dataset \u00b6 The Dataset class represents a TriplyDB dataset. Dataset.addPrefixes(prefixes: object) \u00b6 Adds IRI prefix declarations to the dataset. The prefixes argument is a dictionary object whose keys are aliases and whose values are IRI prefixes. Examples \u00b6 The following snippet adds prefix declarations for aliases id and def to the Iris dataset: const organization = await triply.getOrganization('Triply') const dataset = await organization.getDataset(iris) await dataset.addPrefixes({ def: 'https://triplydb.com/Triply/iris/def/', id: 'https://triplydb.com/Triply/iris/id/', }) Dataset.ensureService(name: string, metadata?: object) \u00b6 Ensures the existence of a service with the given name and with the specified metadata if given. Calling this method ensures that the necessary changes (if any) are made in the connected-to TriplyDB server that result in an end state in which a service with the given name and metadata exists. This method is useful in practice, because it removes the burden on the programmer to have to write custom code for checking for the existence of a service, and conditionally create a new service or make metadata changes to an existing service. The changes made as a result of calling this method depend on the current state of the connected-to TriplyDB server: If this dataset does not yet have a service with the given name , then the behavior is identical to calling Dataset.addService(name: string, metadata?: object) with the same arguments. If this dataset already has a service with the given name , but with different metadata specified for it, then the behavior is identical to calling Account.getDataset(name: string) and Dataset.update(metadata: object) . If this dataset already has a service with the given name and with the same metadata , then this method returns that service. Required \u00b6 name Accepts a string value which is the name of the service to ensure. Optional: metadata \u00b6 serviceMetadata = { type: 'elasticsearch' | 'virtuoso' | 'jena' ; config?: { reasoner?: 'OWL' | 'RDFS' | 'None'; }; }; type Accepts a string value of one of the following: 'virtuoso' , 'elasticsearch' , 'jena' . config Config is an optional property. It accepts an object with a reasoner property. reasoner The reasoner property accepts a string value of either 'OWL' , 'RDFS' , or 'None' . Note: If no options are specified the default service is of type: virtuoso . Note that the config.reasoner will only accept a value when type is: 'jena' Examples Example 1: Ensure a service with no arguments. If not found it's type defaults to virtuoso . await someDataset.ensureService('someServiceName') Example 2: Ensure a service of type jena . await someDataset.ensureService('someServiceName', { type: 'jena' }) Dataset.addService(name: string, metadata?: object) \u00b6 Creates a new service for this dataset. Arguments \u00b6 Required \u00b6 name The URL-friendly name of the service. The name must only contain alphanumeric characters and hyphens (`[A-Za-z0-9\\-]`). Optional \u00b6 The service type is specified with the type parameter. If no type is given, a default of 'virtuoso' is used. It supports the following values: 'virtuoso' Starts a SPARQL service. A SPARQL 1.1 compliant service is very scalable and performance, but without advanced reasoning capabilities. 'jena' Starts a SPARQL JENA service. A SPARQL 1.1 compliant service that is less scalable and less performant, but allows reasoning (RDFS or OWL) to be enabled. 'elasticSearch' Starts an Elasticsearch service. A text search engine that can be used to power a search bar or similar textual search API. The name argument can be used to distinguish between different endpoints over the same dataset that are used for different tasks. Examples \u00b6 The following snippet starts two SPARQL endpoints over a specific dataset. One endpoint will be used in the acceptance environment while the other endpoint will be used in the production system. const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const acceptance = await dataset.addService('acceptance') const production = await dataset.addService('production', { type: 'elasticsearch', }) const reasoning = await dataset.addService('reasoning', { type: 'jena', config: { reasoner: 'OWL' }, }) See also \u00b6 See class Service for an overview of the methods that can be used with service objects. Dataset.clear(...resourceType: string) \u00b6 Removes one or more resource types from the current dataset. Arguments \u00b6 The resources are specified by the rest parameter resourceType , which supports the following values : 'assets' Removes all assets in the dataset. 'graphs' Removes all graphs in the dataset. 'services' Removes all services in the dataset. Examples \u00b6 The following example code removes all graphs and services for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.clear('graphs', 'services') Dataset.copy(account: string, dataset: string) \u00b6 Creates a copy of the current dataset. The owner (user or organization) of the copy is specified with parameter account . The name of the copy is specified with parameter dataset . This operation does not overwrite existing datasets: if the copied-to dataset already exists, a new dataset with suffix -1 will be created. Examples \u00b6 const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') console.log(await dataset.copy('account name', 'copy dataset name')) Dataset.delete() \u00b6 Deletes the dataset. This includes deleting the dataset metadata, all of its graphs, all of its services, and all of its assets. Examples \u00b6 The following snippet deletes a specific dataset that is part of the account associated with the current API token: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.delete() See also \u00b6 Sometimes it is more useful to only delete the graphs that belong to a dataset, but leave the dataset metadata, services, and assets in place. The following methods can be used for this purpose: Dataset.deleteGraph(graphName: string) Dataset.removeAllGraphs() Dataset.deleteGraph(name: string) \u00b6 Deletes the graph with the given name from this dataset. Graph names are IRIs. Examples \u00b6 The following snippet deletes a specific graph from a specified dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.deleteGraph('https://example.org/some-graph') Dataset.describe(iri: string|NamedNode) \u00b6 Each dataset is a collection of triples that describe objects in linked data. Each object is defined with an IRI, an identifier for that object. An object often has incoming and outgoing connections. The Dataset.describe() call can retrieve the incoming and outgoing triples per object. The function returns for a given iri a list of quads where the iri is either in the subject or the object position. Examples \u00b6 The following snippet returns all triples that have https://example.org/id/some-instance in the subject or the object position: const user = await triply.getUser() const dataset = await account.getDataset('my-dataset') console.log(await dataset.describe('https://example.org/id/some-instance')) Dataset.getAsset(name: string, version?: number) \u00b6 Returns the asset with the given name for this dataset. Optionally allows the version number ( version ) of the asset to be specified. If the version number is absent, the latest version of the assert with the given name is returned. Examples \u00b6 The following snippet returns the original version of an image of a dog from the animals dataset: const user = await triply.getUser() const dataset = user.getDataset('my-dataset') const asset = await dataset.getAsset('file.png', 1) Dataset.getAssets() \u00b6 Returns an async iterator over the assets that belong to this dataset. Assets are binary files that are stored together with data graphs. Common examples include documents, images and videos. Examples \u00b6 The following snippet prints the assets for a specific dataset: const user = await triply.getUser() const dataset = user.getDataset('my-dataset') for await (const asset of dataset.getAssets()) { console.log(asset) } The following snippet prints the list of assets for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') console.log(await dataset.getAssets().toArray()) Dataset.getGraph(name: string) \u00b6 Each dataset with data consists out of one or more named graphs. All graphs together are thus the collection of triples of the dataset. Often the graph is used to denote a part of the dataset. For example the data model of the dataset or the metadata of the dataset. Instead of searching over the complete dataset where you want to scope it to a certain graph you can use the getGraph() function to specify the graph. Dataset.getGraph(name: string) returns the graph with the given name that belongs to this dataset. The name is the string representation of the graph IRI. The Dataset.getGraph returns a graph object. Examples \u00b6 The following snippet returns the graph about cats from the dataset about animals: const user = await triply.getUser() const dataset = await user.getDataset('animals') const graph = dataset.getGraph('https://example.com/cats') Dataset.getGraphs() \u00b6 Returns an async iterator over graphs that belong to this dataset. Examples \u00b6 The following snippet retrieves the graphs for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') console.log(await dataset.getGraphs().toArray()) Dataset.getInfo() \u00b6 Returns information about this dataset. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. Examples \u00b6 The following snippet prints the information from the specified dataset of the current user: const user = await triply.getUser() const dataset = await user.getDataset('my-dataset') console.log(await dataset.getInfo()) Dataset.getPrefixes() \u00b6 Returns the prefixes that are defined for this dataset. This contains prefix declarations that are generic and configured for this TriplyDB server, and prefix declarations that are defined for this specific dataset. Examples \u00b6 The following snippet prints the prefix declarations that hold for my-dataset : const user = await triply.getUser() const dataset = user.getDataset('my-dataset') for await (const prefix of dataset.getPrefixes()) { console.log(prefix) } Dataset.getService(name: string) \u00b6 Returns the service with the given name for this dataset. Examples \u00b6 The following snippet retrieves the acceptance service for the product catalog of an imaginary company: const organization = triply.getOrganization('some-company') const dataset = organization.getDataset('product-catalog') const service = dataset.getService('acceptance') Dataset.getServices() \u00b6 Returns an async iterator over TriplyDB services under a dataset. See class Service for an overview of the methods for service objects. Examples \u00b6 The following snippet emits the services that are enabled for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') for await (const service of dataset.getServices()) { console.log(service) } If you do not want to iterate over the services with an async iterator, but instead want to get an array of services use the .toArray() call instead: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') console.log(await dataset.getServices().toArray()) Dataset.getStatements({subject?: string, predicate?: string, object?: string, graph?: string}) \u00b6 Returns an async iterator with statements (quadruples) that fit the specified pattern. Arguments \u00b6 subject , if specified, is the subject term that should be matched. predicate , if specified, is the predicate term that should be matched. object , if specified, is the object term that should be matched. graph , if specified, is the graph name that should be matched. Example \u00b6 The following prints all statements in the dataset: const user = triply.getUser() const dataset = await user.getDataset('my-dataset') for await (const statement of dataset.getStatements()) { console.log(statement) } The following prints the description of the Amsterdam resource in the DBpedia dataset: const association = triply.getOrganization('DBpedia-association') const dbpedia = association.getDataset('dbpedia') for await (const statement of dbpedia.getStatements({subject: 'http://dbpedia.org/resource/Amsterdam'})) { console.log(statement) } Get the data locally \u00b6 Most of the time you do not need to download the entire dataset locally as TriplyDB supports a variety of methods to use linked data directly. But if you want to use the entire graph locally that is possible with TriplyDB.js . There are three methods to retrieve linked data from TriplyDB. graphsToFile() , graphsToStore() and graphsToStream() . Dataset.graphsToFile(destinationPath: string, arguments?: object) \u00b6 The first method downloads the linked data graphs directly and writes the data to the location of the destinationPath . The extension on the destinationPath defines the linked data type that is downloaded. The extensions that are supported are: nt , nq , trig , ttl , jsonld , json . If no extension is set or the extension is not recognized the function will throw an error. Optional \u00b6 The optional properties accepted as arguments for graphsToFile Compressed Argument compressed optionally is a boolean defining if a graph is compressed with GNU zip (gzip) compression algorithm and will end with a `.gz` extension. Graph Argument Graph optionally is an specific graph that you want to write to file. These graph is an instance of a 'Graph' class Examples \u00b6 The following example downloads the dataset to file: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') await dataset.graphsToFile('my-filename.ttl', {compressed: true}) Dataset.graphsToStore(graph?: Graph) \u00b6 The second method is to download the file into a N3.store . The n3 library is one of the most complete libraries for handling linked data in memory. The N3.js library is an implementation of the RDF.js low-level specification that lets you handle RDF in JavaScript easily, with an asynchronous, streaming approach. To reduce the overhead of downloading your data to file and then insert it in the N3 Store. TriplyDB.js has a graphsToStore() where a N3 store is returned as a result of the graphsToStore() function. Optional \u00b6 The optional argument for graphsToStore is Graph . With Graph you can optionally define a specific graph that you want to write to file. These graph is an instance of a 'Graph' class. Examples \u00b6 The following example downloads the dataset as N3.store : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const store = await dataset.graphsToStore() Dataset.graphsToStream(type: 'compressed' | 'rdf-js', arguments?: object) \u00b6 The final method to download linked data to a local source is the graphsToStream this function returns a stream of quads that can directly be iterated over. The Stream is either of the type compressed which returns a gzipped stream of linked data, or type rdf-js which returns a stream of quads parsed according to the rdf-js standard . Optional \u00b6 The following arguments can be defined in the optional arguments object. Extension Argument Extension optionally defines the linked data type that is streamed. The extensions that are supported are: `nt`, `nq`, `trig`, `ttl`, `jsonld`, `json`. Graph Argument Graph optionally is an specific graph that you want to write to file. This graph is an instance of a 'Graph' class Examples \u00b6 The following example streams through the dataset as rdf-js quad objects and prints the quad to the screen. Notice that the stream is an async iterator. Example 1 const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const stream = await dataset.graphsToStream('rdf-js', {extension: '.nq'}) for await(const quad of stream){ console.log(quad) } The following example streams through the dataset as chunks of ttl. and prints the buffer to the screen. Example 2 const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const stream = await dataset.graphsToStream('compressed', {extension: '.ttl'}) for await(const quad of stream.pipe(zlib.createGunzip())){ console.log((quad as Buffer).toString()) } Dataset.importFromDataset(fromDataset: Dataset, arguments?: object) \u00b6 Imports one or more named graphs from a different dataset into this dataset. Data reuse is an important principle in linked data. This functionality makes it very easy to pull in vocabularies and datasets from other places. Changes in the fromDataset dataset are not automatically reflected in this dataset. If you want to synchronize with changes made in the imported-from dataset, the graphs must be explicitly imported. This protects this dataset against unanticipated changes in the imported-from dataset, while still being able to stay in sync with the imported-from dataset if this is explicitly requested. Required \u00b6 Argument fromDataset is the dataset object from which one or more graphs are imported over to this dataset. Optional \u00b6 The optional properties accepted as arguments for importFromDataset graphMap Argument ` graphMap ` optionally is an object with keys and values that implements a mapping from existing graph names (keys) to newly created graph names (values). Each key must be an existing graph name in the `from` dataset. Each value must be the corresponding graph name in this dataset. If this argument is not specified, then graph names in the `from` dataset are identical to graph names in this dataset. Note that either graphNames or graphMap can be given as optional argument and not both. graphNames Argument ` graphNames ` optionally is an array of graph names. These names can be one of three types: 'string', instances of a 'Graph' class, or instances of 'NamedNodes'. Note that either graphNames or graphMap can be given as optional argument and not both. overwrite Accepts a Boolean value. An optional property that determines whether existing graph names in this dataset are allowed to be silently overwritten. If this argument is not specified, then `false` is used as the default value. Examples \u00b6 The following snippet creates a new dataset ( newDataset ) and imports one graph from an existing dataset ( existingDataset ). Notice that the graph can be renamed as part of the import. Example 1 Imports the complete 'existingDataset' dataset to the 'newDataset' . const account = await triply.getAccount() const existingDataset = await account.getDataset('existingDataset') const newDataset = await account.addDataset('newDataset') await newDataset.importFromDataset(existingDataset) Example 2 Imports 'anotherDataset' dataset to a 'newDataset' Where a graph from the existing dataset is renamed to the a graphname in the new dataset. Only the graphs from the graphMap are imported. const account = await triply.getAccount() const anotherDataset = await account.getDataset('anotherDataset') const newDataset = await account.addDataset('newDataset') await newDataset.importFromDataset(anotherDataset, { graphMap: { 'https://example.org/existingDataset/graph': 'https://example.org/newDataset/graph'} }) Example 3 Import 'oneMoreDataset' dataset to the 'newDataset' Where a graph specific graph from the existing dataset is added to the new dataset. If the graph name already occurs in the 'newDataset' it will get overwritten. const account = await triply.getAccount() const oneMoreDataset = await account.getDataset('oneMoreDataset') const newDataset = await account.addDataset('newDataset') await newDataset.importFromDataset(oneMoreDataset, { graphNames: ['https://example.org/existingDataset/graph'], overwrite: true, }) Dataset.importFromFiles(files: list(string || File), defaultsConfig?: object) \u00b6 Required \u00b6 Imports one or more files into this dataset. The files must contain RDF data. Optional: defaultsConfig: object defaultGraphName Accepts a string value that is set as the default graph name for each imported file baseIRI Accepts a string value that is set as the default baseIRI for each imported file overwriteAll Accepts a boolean value that overwrites previously added graph names or baseIRIs (regardless of whether they came from a URL or a file) mergeGraphs Accepts a Boolean value. An optional property that determines whether existing graph in this dataset are merged with the imported graphs. If this argument is not specified, then `false` is used as the default value. Supported files \u00b6 The files must contain RDF data and must be encoded in one of the following standardized RDF serialization formats: N-Quads, N-Triples, TriG, Turtle. Examples \u00b6 Example 1 const account = await triply.getAccount('Triply') const dataset = await account.getDataset(iris) await dataset.importFromFiles('test.nt') await dataset.importFromFiles(['file.nq', 'file.tar.gz']) Example 2 const account = await triply.getAccount('Triply') const dataset = await account.getDataset(iris) await dataset.importFromFiles('test.nt') await dataset.importFromFiles(['file.nq', 'file.tar.gz'], { defaultGraphName: 'https://triplydb.com/Triply/example/graph/default', overwriteAll: true, }) Dataset.importFromStore(store: n3.Store, defaultsConfig?: object) \u00b6 One of the most complete libraries for handling linked data in memory is the n3 library . The N3.js library is an implementation of the RDF.js low-level specification that lets you handle RDF in JavaScript easily, with an asynchronous, streaming approach. To reduce the overhead of converting your data from the N3 Store to a file and uploading to TriplyDB. TriplyDB.js has a importFromStore() where a N3 store is given as first argument and uploaded directly to triplyDB. Examples \u00b6 const store = new Store() store.addQuad(DataFactory.namedNode('https://triplydb.com/id/me'),DataFactory.namedNode('http://www.w3.org/2000/01/rdf-schema#label'),DataFactory.literal('me'),DataFactory.namedNode('https://triplydb.com/Triply/example/graph/default')) const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const dataset = (await user.getDatasets().toArray())[0] dataset.importFromStore(store) Dataset.importFromUrls(urls: list(string), defaultsConfig?: object) \u00b6 Required \u00b6 Imports one or more URLs into this dataset. The URLs must provide access to RDF data. Optional: defaultsConfig: object defaultGraphName Accepts a string value that is set as the default graph name for each imported URL baseIRI Accepts a string value that is set as the default baseIRI for each imported URL overwriteAll Accepts a boolean value that overwrites previously added graph names or baseIRIs (regardless of whether they came from a URL or a file) Examples \u00b6 dataset1.importFromUrls(['url1', 'url2', 'url3']) Dataset.removeAllGraphs() \u00b6 Removes all graphs from this dataset. Examples \u00b6 The following snippet removed all graphs from a specific dataset: const user = await triply.getUser() const dataset = await user.getDataset('my-dataset') await dataset.removeAllGraphs() Dataset.removePrefixes(prefixes: string[]) \u00b6 Removes IRI prefixes from this dataset. The prefixes argument is a string array, containing the prefix labels to be removed. Examples \u00b6 The following snippet removes the def and id prefixes from the specified dataset. const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.removePrefixes(['def', 'id']) Dataset.renameGraph(from: string, to: string) \u00b6 Renames a graph of this dataset, where from is the current graph name and to is the new graph name. The string arguments for from and to must be valid IRIs. Examples \u00b6 The following snippet renames a specific graph of a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.renameGraph( 'https://example.org/old-graph', 'https://example.org/new-graph' ) Dataset.update(metadata: object) \u00b6 Updates the metadata for this dataset. Arguments \u00b6 The metadata argument takes a dictionary object with the following optional keys: Required: accessLevel The access level of the dataset. The following values are supported: 'private' The dataset can only be accessed by the Account object for which it is created. 'internal' The dataset can only be accessed by people who are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. Optional: description The description of the dataset. This description can make use of Markdown. displayName The human-readable name of the dataset. This name may contain spaces and other characters that are not allowed in the URL-friendly name. license The license of the dataset. The following license strings are currently supported: 'CC-BY-SA' 'CC0 1.0' 'GFDL' 'ODC-By' 'ODC-ODbL' 'PDDL' Example \u00b6 The following snippet updates the dataset's access level, description, display name and license: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') dataset.update({ accessLevel: 'private', description: 'desc', displayName: 'disp', license: 'PDDL', }) Dataset.uploadAsset( filePath: string, opts?: {mode?: 'throw-if-exists'| 'replace-if-exists'| 'append-version', name?: string}) \u00b6 Uploads a file that does not contain RDF data as an asset. You can specify the name on the asset and what to do if the asset already exists (throws an error by default). User cases \u00b6 There are several use cases for assets: Source data that will be used as input files to an ETL process. Documentation files that describe the dataset. Media files (audio/image/video) that are described in the RDF graph. Examples \u00b6 The following snippet uploads a source CSV data file and a PDF documentation file: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.uploadAsset('my-source-data', {name: 'source.csv.gz'}) await dataset.uploadAsset('my-documentation', {name: 'documentation.pdf'}) await dataset.uploadAsset('my-documentation', {mode:'append-version', name: 'documentation.pdf'})","title":"Dataset"},{"location":"triplydb-js/dataset/#dataset","text":"The Dataset class represents a TriplyDB dataset.","title":"Dataset"},{"location":"triplydb-js/dataset/#datasetaddprefixesprefixes-object","text":"Adds IRI prefix declarations to the dataset. The prefixes argument is a dictionary object whose keys are aliases and whose values are IRI prefixes.","title":"Dataset.addPrefixes(prefixes: object)"},{"location":"triplydb-js/dataset/#examples","text":"The following snippet adds prefix declarations for aliases id and def to the Iris dataset: const organization = await triply.getOrganization('Triply') const dataset = await organization.getDataset(iris) await dataset.addPrefixes({ def: 'https://triplydb.com/Triply/iris/def/', id: 'https://triplydb.com/Triply/iris/id/', })","title":"Examples"},{"location":"triplydb-js/dataset/#datasetensureservicename-string-metadata-object","text":"Ensures the existence of a service with the given name and with the specified metadata if given. Calling this method ensures that the necessary changes (if any) are made in the connected-to TriplyDB server that result in an end state in which a service with the given name and metadata exists. This method is useful in practice, because it removes the burden on the programmer to have to write custom code for checking for the existence of a service, and conditionally create a new service or make metadata changes to an existing service. The changes made as a result of calling this method depend on the current state of the connected-to TriplyDB server: If this dataset does not yet have a service with the given name , then the behavior is identical to calling Dataset.addService(name: string, metadata?: object) with the same arguments. If this dataset already has a service with the given name , but with different metadata specified for it, then the behavior is identical to calling Account.getDataset(name: string) and Dataset.update(metadata: object) . If this dataset already has a service with the given name and with the same metadata , then this method returns that service.","title":"Dataset.ensureService(name: string, metadata?: object)"},{"location":"triplydb-js/dataset/#required","text":"name Accepts a string value which is the name of the service to ensure.","title":"Required"},{"location":"triplydb-js/dataset/#optional-metadata","text":"serviceMetadata = { type: 'elasticsearch' | 'virtuoso' | 'jena' ; config?: { reasoner?: 'OWL' | 'RDFS' | 'None'; }; }; type Accepts a string value of one of the following: 'virtuoso' , 'elasticsearch' , 'jena' . config Config is an optional property. It accepts an object with a reasoner property. reasoner The reasoner property accepts a string value of either 'OWL' , 'RDFS' , or 'None' . Note: If no options are specified the default service is of type: virtuoso . Note that the config.reasoner will only accept a value when type is: 'jena' Examples Example 1: Ensure a service with no arguments. If not found it's type defaults to virtuoso . await someDataset.ensureService('someServiceName') Example 2: Ensure a service of type jena . await someDataset.ensureService('someServiceName', { type: 'jena' })","title":"Optional: metadata"},{"location":"triplydb-js/dataset/#datasetaddservicename-string-metadata-object","text":"Creates a new service for this dataset.","title":"Dataset.addService(name: string, metadata?: object)"},{"location":"triplydb-js/dataset/#arguments","text":"","title":"Arguments"},{"location":"triplydb-js/dataset/#required_1","text":"name The URL-friendly name of the service. The name must only contain alphanumeric characters and hyphens (`[A-Za-z0-9\\-]`).","title":"Required"},{"location":"triplydb-js/dataset/#optional","text":"The service type is specified with the type parameter. If no type is given, a default of 'virtuoso' is used. It supports the following values: 'virtuoso' Starts a SPARQL service. A SPARQL 1.1 compliant service is very scalable and performance, but without advanced reasoning capabilities. 'jena' Starts a SPARQL JENA service. A SPARQL 1.1 compliant service that is less scalable and less performant, but allows reasoning (RDFS or OWL) to be enabled. 'elasticSearch' Starts an Elasticsearch service. A text search engine that can be used to power a search bar or similar textual search API. The name argument can be used to distinguish between different endpoints over the same dataset that are used for different tasks.","title":"Optional"},{"location":"triplydb-js/dataset/#examples_1","text":"The following snippet starts two SPARQL endpoints over a specific dataset. One endpoint will be used in the acceptance environment while the other endpoint will be used in the production system. const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const acceptance = await dataset.addService('acceptance') const production = await dataset.addService('production', { type: 'elasticsearch', }) const reasoning = await dataset.addService('reasoning', { type: 'jena', config: { reasoner: 'OWL' }, })","title":"Examples"},{"location":"triplydb-js/dataset/#see-also","text":"See class Service for an overview of the methods that can be used with service objects.","title":"See also"},{"location":"triplydb-js/dataset/#datasetclearresourcetype-string","text":"Removes one or more resource types from the current dataset.","title":"Dataset.clear(...resourceType: string)"},{"location":"triplydb-js/dataset/#arguments_1","text":"The resources are specified by the rest parameter resourceType , which supports the following values : 'assets' Removes all assets in the dataset. 'graphs' Removes all graphs in the dataset. 'services' Removes all services in the dataset.","title":"Arguments"},{"location":"triplydb-js/dataset/#examples_2","text":"The following example code removes all graphs and services for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.clear('graphs', 'services')","title":"Examples"},{"location":"triplydb-js/dataset/#datasetcopyaccount-string-dataset-string","text":"Creates a copy of the current dataset. The owner (user or organization) of the copy is specified with parameter account . The name of the copy is specified with parameter dataset . This operation does not overwrite existing datasets: if the copied-to dataset already exists, a new dataset with suffix -1 will be created.","title":"Dataset.copy(account: string, dataset: string)"},{"location":"triplydb-js/dataset/#examples_3","text":"const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') console.log(await dataset.copy('account name', 'copy dataset name'))","title":"Examples"},{"location":"triplydb-js/dataset/#datasetdelete","text":"Deletes the dataset. This includes deleting the dataset metadata, all of its graphs, all of its services, and all of its assets.","title":"Dataset.delete()"},{"location":"triplydb-js/dataset/#examples_4","text":"The following snippet deletes a specific dataset that is part of the account associated with the current API token: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.delete()","title":"Examples"},{"location":"triplydb-js/dataset/#see-also_1","text":"Sometimes it is more useful to only delete the graphs that belong to a dataset, but leave the dataset metadata, services, and assets in place. The following methods can be used for this purpose: Dataset.deleteGraph(graphName: string) Dataset.removeAllGraphs()","title":"See also"},{"location":"triplydb-js/dataset/#datasetdeletegraphname-string","text":"Deletes the graph with the given name from this dataset. Graph names are IRIs.","title":"Dataset.deleteGraph(name: string)"},{"location":"triplydb-js/dataset/#examples_5","text":"The following snippet deletes a specific graph from a specified dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.deleteGraph('https://example.org/some-graph')","title":"Examples"},{"location":"triplydb-js/dataset/#datasetdescribeiri-stringnamednode","text":"Each dataset is a collection of triples that describe objects in linked data. Each object is defined with an IRI, an identifier for that object. An object often has incoming and outgoing connections. The Dataset.describe() call can retrieve the incoming and outgoing triples per object. The function returns for a given iri a list of quads where the iri is either in the subject or the object position.","title":"Dataset.describe(iri: string|NamedNode)"},{"location":"triplydb-js/dataset/#examples_6","text":"The following snippet returns all triples that have https://example.org/id/some-instance in the subject or the object position: const user = await triply.getUser() const dataset = await account.getDataset('my-dataset') console.log(await dataset.describe('https://example.org/id/some-instance'))","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetassetname-string-version-number","text":"Returns the asset with the given name for this dataset. Optionally allows the version number ( version ) of the asset to be specified. If the version number is absent, the latest version of the assert with the given name is returned.","title":"Dataset.getAsset(name: string, version?: number)"},{"location":"triplydb-js/dataset/#examples_7","text":"The following snippet returns the original version of an image of a dog from the animals dataset: const user = await triply.getUser() const dataset = user.getDataset('my-dataset') const asset = await dataset.getAsset('file.png', 1)","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetassets","text":"Returns an async iterator over the assets that belong to this dataset. Assets are binary files that are stored together with data graphs. Common examples include documents, images and videos.","title":"Dataset.getAssets()"},{"location":"triplydb-js/dataset/#examples_8","text":"The following snippet prints the assets for a specific dataset: const user = await triply.getUser() const dataset = user.getDataset('my-dataset') for await (const asset of dataset.getAssets()) { console.log(asset) } The following snippet prints the list of assets for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') console.log(await dataset.getAssets().toArray())","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetgraphname-string","text":"Each dataset with data consists out of one or more named graphs. All graphs together are thus the collection of triples of the dataset. Often the graph is used to denote a part of the dataset. For example the data model of the dataset or the metadata of the dataset. Instead of searching over the complete dataset where you want to scope it to a certain graph you can use the getGraph() function to specify the graph. Dataset.getGraph(name: string) returns the graph with the given name that belongs to this dataset. The name is the string representation of the graph IRI. The Dataset.getGraph returns a graph object.","title":"Dataset.getGraph(name: string)"},{"location":"triplydb-js/dataset/#examples_9","text":"The following snippet returns the graph about cats from the dataset about animals: const user = await triply.getUser() const dataset = await user.getDataset('animals') const graph = dataset.getGraph('https://example.com/cats')","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetgraphs","text":"Returns an async iterator over graphs that belong to this dataset.","title":"Dataset.getGraphs()"},{"location":"triplydb-js/dataset/#examples_10","text":"The following snippet retrieves the graphs for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') console.log(await dataset.getGraphs().toArray())","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetinfo","text":"Returns information about this dataset. Information is returned in a dictionary object. Individual keys can be accessed for specific information values.","title":"Dataset.getInfo()"},{"location":"triplydb-js/dataset/#examples_11","text":"The following snippet prints the information from the specified dataset of the current user: const user = await triply.getUser() const dataset = await user.getDataset('my-dataset') console.log(await dataset.getInfo())","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetprefixes","text":"Returns the prefixes that are defined for this dataset. This contains prefix declarations that are generic and configured for this TriplyDB server, and prefix declarations that are defined for this specific dataset.","title":"Dataset.getPrefixes()"},{"location":"triplydb-js/dataset/#examples_12","text":"The following snippet prints the prefix declarations that hold for my-dataset : const user = await triply.getUser() const dataset = user.getDataset('my-dataset') for await (const prefix of dataset.getPrefixes()) { console.log(prefix) }","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetservicename-string","text":"Returns the service with the given name for this dataset.","title":"Dataset.getService(name: string)"},{"location":"triplydb-js/dataset/#examples_13","text":"The following snippet retrieves the acceptance service for the product catalog of an imaginary company: const organization = triply.getOrganization('some-company') const dataset = organization.getDataset('product-catalog') const service = dataset.getService('acceptance')","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetservices","text":"Returns an async iterator over TriplyDB services under a dataset. See class Service for an overview of the methods for service objects.","title":"Dataset.getServices()"},{"location":"triplydb-js/dataset/#examples_14","text":"The following snippet emits the services that are enabled for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') for await (const service of dataset.getServices()) { console.log(service) } If you do not want to iterate over the services with an async iterator, but instead want to get an array of services use the .toArray() call instead: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') console.log(await dataset.getServices().toArray())","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgetstatementssubject-string-predicate-string-object-string-graph-string","text":"Returns an async iterator with statements (quadruples) that fit the specified pattern.","title":"Dataset.getStatements({subject?: string, predicate?: string, object?: string, graph?: string})"},{"location":"triplydb-js/dataset/#arguments_2","text":"subject , if specified, is the subject term that should be matched. predicate , if specified, is the predicate term that should be matched. object , if specified, is the object term that should be matched. graph , if specified, is the graph name that should be matched.","title":"Arguments"},{"location":"triplydb-js/dataset/#example","text":"The following prints all statements in the dataset: const user = triply.getUser() const dataset = await user.getDataset('my-dataset') for await (const statement of dataset.getStatements()) { console.log(statement) } The following prints the description of the Amsterdam resource in the DBpedia dataset: const association = triply.getOrganization('DBpedia-association') const dbpedia = association.getDataset('dbpedia') for await (const statement of dbpedia.getStatements({subject: 'http://dbpedia.org/resource/Amsterdam'})) { console.log(statement) }","title":"Example"},{"location":"triplydb-js/dataset/#get-the-data-locally","text":"Most of the time you do not need to download the entire dataset locally as TriplyDB supports a variety of methods to use linked data directly. But if you want to use the entire graph locally that is possible with TriplyDB.js . There are three methods to retrieve linked data from TriplyDB. graphsToFile() , graphsToStore() and graphsToStream() .","title":"Get the data locally"},{"location":"triplydb-js/dataset/#datasetgraphstofiledestinationpath-string-arguments-object","text":"The first method downloads the linked data graphs directly and writes the data to the location of the destinationPath . The extension on the destinationPath defines the linked data type that is downloaded. The extensions that are supported are: nt , nq , trig , ttl , jsonld , json . If no extension is set or the extension is not recognized the function will throw an error.","title":"Dataset.graphsToFile(destinationPath: string, arguments?: object)"},{"location":"triplydb-js/dataset/#optional_1","text":"The optional properties accepted as arguments for graphsToFile Compressed Argument compressed optionally is a boolean defining if a graph is compressed with GNU zip (gzip) compression algorithm and will end with a `.gz` extension. Graph Argument Graph optionally is an specific graph that you want to write to file. These graph is an instance of a 'Graph' class","title":"Optional"},{"location":"triplydb-js/dataset/#examples_15","text":"The following example downloads the dataset to file: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') await dataset.graphsToFile('my-filename.ttl', {compressed: true})","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgraphstostoregraph-graph","text":"The second method is to download the file into a N3.store . The n3 library is one of the most complete libraries for handling linked data in memory. The N3.js library is an implementation of the RDF.js low-level specification that lets you handle RDF in JavaScript easily, with an asynchronous, streaming approach. To reduce the overhead of downloading your data to file and then insert it in the N3 Store. TriplyDB.js has a graphsToStore() where a N3 store is returned as a result of the graphsToStore() function.","title":"Dataset.graphsToStore(graph?: Graph)"},{"location":"triplydb-js/dataset/#optional_2","text":"The optional argument for graphsToStore is Graph . With Graph you can optionally define a specific graph that you want to write to file. These graph is an instance of a 'Graph' class.","title":"Optional"},{"location":"triplydb-js/dataset/#examples_16","text":"The following example downloads the dataset as N3.store : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const store = await dataset.graphsToStore()","title":"Examples"},{"location":"triplydb-js/dataset/#datasetgraphstostreamtype-compressed-rdf-js-arguments-object","text":"The final method to download linked data to a local source is the graphsToStream this function returns a stream of quads that can directly be iterated over. The Stream is either of the type compressed which returns a gzipped stream of linked data, or type rdf-js which returns a stream of quads parsed according to the rdf-js standard .","title":"Dataset.graphsToStream(type: 'compressed' | 'rdf-js', arguments?: object)"},{"location":"triplydb-js/dataset/#optional_3","text":"The following arguments can be defined in the optional arguments object. Extension Argument Extension optionally defines the linked data type that is streamed. The extensions that are supported are: `nt`, `nq`, `trig`, `ttl`, `jsonld`, `json`. Graph Argument Graph optionally is an specific graph that you want to write to file. This graph is an instance of a 'Graph' class","title":"Optional"},{"location":"triplydb-js/dataset/#examples_17","text":"The following example streams through the dataset as rdf-js quad objects and prints the quad to the screen. Notice that the stream is an async iterator. Example 1 const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const stream = await dataset.graphsToStream('rdf-js', {extension: '.nq'}) for await(const quad of stream){ console.log(quad) } The following example streams through the dataset as chunks of ttl. and prints the buffer to the screen. Example 2 const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const stream = await dataset.graphsToStream('compressed', {extension: '.ttl'}) for await(const quad of stream.pipe(zlib.createGunzip())){ console.log((quad as Buffer).toString()) }","title":"Examples"},{"location":"triplydb-js/dataset/#datasetimportfromdatasetfromdataset-dataset-arguments-object","text":"Imports one or more named graphs from a different dataset into this dataset. Data reuse is an important principle in linked data. This functionality makes it very easy to pull in vocabularies and datasets from other places. Changes in the fromDataset dataset are not automatically reflected in this dataset. If you want to synchronize with changes made in the imported-from dataset, the graphs must be explicitly imported. This protects this dataset against unanticipated changes in the imported-from dataset, while still being able to stay in sync with the imported-from dataset if this is explicitly requested.","title":"Dataset.importFromDataset(fromDataset: Dataset, arguments?: object)"},{"location":"triplydb-js/dataset/#required_2","text":"Argument fromDataset is the dataset object from which one or more graphs are imported over to this dataset.","title":"Required"},{"location":"triplydb-js/dataset/#optional_4","text":"The optional properties accepted as arguments for importFromDataset graphMap Argument ` graphMap ` optionally is an object with keys and values that implements a mapping from existing graph names (keys) to newly created graph names (values). Each key must be an existing graph name in the `from` dataset. Each value must be the corresponding graph name in this dataset. If this argument is not specified, then graph names in the `from` dataset are identical to graph names in this dataset. Note that either graphNames or graphMap can be given as optional argument and not both. graphNames Argument ` graphNames ` optionally is an array of graph names. These names can be one of three types: 'string', instances of a 'Graph' class, or instances of 'NamedNodes'. Note that either graphNames or graphMap can be given as optional argument and not both. overwrite Accepts a Boolean value. An optional property that determines whether existing graph names in this dataset are allowed to be silently overwritten. If this argument is not specified, then `false` is used as the default value.","title":"Optional"},{"location":"triplydb-js/dataset/#examples_18","text":"The following snippet creates a new dataset ( newDataset ) and imports one graph from an existing dataset ( existingDataset ). Notice that the graph can be renamed as part of the import. Example 1 Imports the complete 'existingDataset' dataset to the 'newDataset' . const account = await triply.getAccount() const existingDataset = await account.getDataset('existingDataset') const newDataset = await account.addDataset('newDataset') await newDataset.importFromDataset(existingDataset) Example 2 Imports 'anotherDataset' dataset to a 'newDataset' Where a graph from the existing dataset is renamed to the a graphname in the new dataset. Only the graphs from the graphMap are imported. const account = await triply.getAccount() const anotherDataset = await account.getDataset('anotherDataset') const newDataset = await account.addDataset('newDataset') await newDataset.importFromDataset(anotherDataset, { graphMap: { 'https://example.org/existingDataset/graph': 'https://example.org/newDataset/graph'} }) Example 3 Import 'oneMoreDataset' dataset to the 'newDataset' Where a graph specific graph from the existing dataset is added to the new dataset. If the graph name already occurs in the 'newDataset' it will get overwritten. const account = await triply.getAccount() const oneMoreDataset = await account.getDataset('oneMoreDataset') const newDataset = await account.addDataset('newDataset') await newDataset.importFromDataset(oneMoreDataset, { graphNames: ['https://example.org/existingDataset/graph'], overwrite: true, })","title":"Examples"},{"location":"triplydb-js/dataset/#datasetimportfromfilesfiles-liststring-file-defaultsconfig-object","text":"","title":"Dataset.importFromFiles(files: list(string || File), defaultsConfig?: object)"},{"location":"triplydb-js/dataset/#required_3","text":"Imports one or more files into this dataset. The files must contain RDF data. Optional: defaultsConfig: object defaultGraphName Accepts a string value that is set as the default graph name for each imported file baseIRI Accepts a string value that is set as the default baseIRI for each imported file overwriteAll Accepts a boolean value that overwrites previously added graph names or baseIRIs (regardless of whether they came from a URL or a file) mergeGraphs Accepts a Boolean value. An optional property that determines whether existing graph in this dataset are merged with the imported graphs. If this argument is not specified, then `false` is used as the default value.","title":"Required"},{"location":"triplydb-js/dataset/#supported-files","text":"The files must contain RDF data and must be encoded in one of the following standardized RDF serialization formats: N-Quads, N-Triples, TriG, Turtle.","title":"Supported files"},{"location":"triplydb-js/dataset/#examples_19","text":"Example 1 const account = await triply.getAccount('Triply') const dataset = await account.getDataset(iris) await dataset.importFromFiles('test.nt') await dataset.importFromFiles(['file.nq', 'file.tar.gz']) Example 2 const account = await triply.getAccount('Triply') const dataset = await account.getDataset(iris) await dataset.importFromFiles('test.nt') await dataset.importFromFiles(['file.nq', 'file.tar.gz'], { defaultGraphName: 'https://triplydb.com/Triply/example/graph/default', overwriteAll: true, })","title":"Examples"},{"location":"triplydb-js/dataset/#datasetimportfromstorestore-n3store-defaultsconfig-object","text":"One of the most complete libraries for handling linked data in memory is the n3 library . The N3.js library is an implementation of the RDF.js low-level specification that lets you handle RDF in JavaScript easily, with an asynchronous, streaming approach. To reduce the overhead of converting your data from the N3 Store to a file and uploading to TriplyDB. TriplyDB.js has a importFromStore() where a N3 store is given as first argument and uploaded directly to triplyDB.","title":"Dataset.importFromStore(store: n3.Store, defaultsConfig?: object)"},{"location":"triplydb-js/dataset/#examples_20","text":"const store = new Store() store.addQuad(DataFactory.namedNode('https://triplydb.com/id/me'),DataFactory.namedNode('http://www.w3.org/2000/01/rdf-schema#label'),DataFactory.literal('me'),DataFactory.namedNode('https://triplydb.com/Triply/example/graph/default')) const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getUser() const dataset = (await user.getDatasets().toArray())[0] dataset.importFromStore(store)","title":"Examples"},{"location":"triplydb-js/dataset/#datasetimportfromurlsurls-liststring-defaultsconfig-object","text":"","title":"Dataset.importFromUrls(urls: list(string), defaultsConfig?: object)"},{"location":"triplydb-js/dataset/#required_4","text":"Imports one or more URLs into this dataset. The URLs must provide access to RDF data. Optional: defaultsConfig: object defaultGraphName Accepts a string value that is set as the default graph name for each imported URL baseIRI Accepts a string value that is set as the default baseIRI for each imported URL overwriteAll Accepts a boolean value that overwrites previously added graph names or baseIRIs (regardless of whether they came from a URL or a file)","title":"Required"},{"location":"triplydb-js/dataset/#examples_21","text":"dataset1.importFromUrls(['url1', 'url2', 'url3'])","title":"Examples"},{"location":"triplydb-js/dataset/#datasetremoveallgraphs","text":"Removes all graphs from this dataset.","title":"Dataset.removeAllGraphs()"},{"location":"triplydb-js/dataset/#examples_22","text":"The following snippet removed all graphs from a specific dataset: const user = await triply.getUser() const dataset = await user.getDataset('my-dataset') await dataset.removeAllGraphs()","title":"Examples"},{"location":"triplydb-js/dataset/#datasetremoveprefixesprefixes-string","text":"Removes IRI prefixes from this dataset. The prefixes argument is a string array, containing the prefix labels to be removed.","title":"Dataset.removePrefixes(prefixes: string[])"},{"location":"triplydb-js/dataset/#examples_23","text":"The following snippet removes the def and id prefixes from the specified dataset. const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.removePrefixes(['def', 'id'])","title":"Examples"},{"location":"triplydb-js/dataset/#datasetrenamegraphfrom-string-to-string","text":"Renames a graph of this dataset, where from is the current graph name and to is the new graph name. The string arguments for from and to must be valid IRIs.","title":"Dataset.renameGraph(from: string, to: string)"},{"location":"triplydb-js/dataset/#examples_24","text":"The following snippet renames a specific graph of a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.renameGraph( 'https://example.org/old-graph', 'https://example.org/new-graph' )","title":"Examples"},{"location":"triplydb-js/dataset/#datasetupdatemetadata-object","text":"Updates the metadata for this dataset.","title":"Dataset.update(metadata: object)"},{"location":"triplydb-js/dataset/#arguments_3","text":"The metadata argument takes a dictionary object with the following optional keys: Required: accessLevel The access level of the dataset. The following values are supported: 'private' The dataset can only be accessed by the Account object for which it is created. 'internal' The dataset can only be accessed by people who are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. Optional: description The description of the dataset. This description can make use of Markdown. displayName The human-readable name of the dataset. This name may contain spaces and other characters that are not allowed in the URL-friendly name. license The license of the dataset. The following license strings are currently supported: 'CC-BY-SA' 'CC0 1.0' 'GFDL' 'ODC-By' 'ODC-ODbL' 'PDDL'","title":"Arguments"},{"location":"triplydb-js/dataset/#example_1","text":"The following snippet updates the dataset's access level, description, display name and license: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') dataset.update({ accessLevel: 'private', description: 'desc', displayName: 'disp', license: 'PDDL', })","title":"Example"},{"location":"triplydb-js/dataset/#datasetuploadasset-filepath-string-opts-mode-throw-if-exists-replace-if-exists-append-version-name-string","text":"Uploads a file that does not contain RDF data as an asset. You can specify the name on the asset and what to do if the asset already exists (throws an error by default).","title":"Dataset.uploadAsset( filePath: string, opts?: {mode?: 'throw-if-exists'| 'replace-if-exists'| 'append-version', name?: string})"},{"location":"triplydb-js/dataset/#user-cases","text":"There are several use cases for assets: Source data that will be used as input files to an ETL process. Documentation files that describe the dataset. Media files (audio/image/video) that are described in the RDF graph.","title":"User cases"},{"location":"triplydb-js/dataset/#examples_25","text":"The following snippet uploads a source CSV data file and a PDF documentation file: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') await dataset.uploadAsset('my-source-data', {name: 'source.csv.gz'}) await dataset.uploadAsset('my-documentation', {name: 'documentation.pdf'}) await dataset.uploadAsset('my-documentation', {mode:'append-version', name: 'documentation.pdf'})","title":"Examples"},{"location":"triplydb-js/faq/","text":"On this page: FAQ How to perform a SPARQL query? What is the latest version of TriplyDB.js? What to do when the \u201cError: Unauthorized\u201d appears? How do I get the results of a saved query using TriplyDB.js? What is an async iterator? FAQ \u00b6 This section includes answers to frequently asked questions. Please contact info@triply.cc if you have a question that does not appear in this list. How to perform a SPARQL query? \u00b6 The SPARQL 1.1 Protocol standard specifies a native HTTP API for performing SPARQL requests. Such requests can be performed with regular HTTP libraries. Here we give an example indicating how such an HTTP library can be used: import SuperAgent from 'superagent'; const reply = await SuperAgent.post('SPARQL_ENDPOINT') .set('Accept', 'application/sparql-results+json') .set('Authorization', 'Bearer ' + process.env.TOKEN) .buffer(true) .send({ query: 'select * { WHERE_CLAUSE } offset 0 limit 10000' }) // break condition when the result set is empty. // downsides: caching, string manipulation What is the latest version of TriplyDB.js? \u00b6 The latest version of TriplyDB.js can be found in the NPM repository . What to do when the \u201cError: Unauthorized\u201d appears? \u00b6 This error appears whenever an operation is performed for which the user denoted by the current API token is not authorized. One common appearance of this error is when the environment variable TOKEN is not set to an API token. The current value of the environment variable can be tested by running the following command in the terminal: echo $TOKEN How do I get the results of a saved query using TriplyDB.js? \u00b6 To reliably retrieve a large number of results as the output of a construct or select query, follow these steps: 1. Import the triplydb library. import App from '@triply/triplydb'; 2. Set your parameters, regarding the TriplyDB server and the account in which you have saved the query as well as the name of the query. const triply = App.get({ url: 'https://api.triplydb.com' }) const account = await triply.getAccount('account-name') const query = await account.getQuery('name-of-some-query') If the query is not public, you should set your API token rather than the URL. const triply = App.get({ token: process.env.TOKEN }) 3. Do not forget that we perform TriplyDB.js requests within an async context. That is: async function run() { // Your code goes here. } run() 4. Get the results of a query by setting a results variable. More specifically, for construct queries: const results = query.results().statements() For select queries: const results = query.results().bindings() Note that for SPARQL construct queries, we use method .statements() , while for SPARQL select queries, we use method .bindings() . Additionally, saved queries can have API variables that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows: // For SPARQL construct queries. const results = query .results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable', }) .statements() // For SPARQL select queries. const results = query .results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable', }) .bindings() 5. To read the results you have three options: 5a. Iterate through the results per row in a for -loop: // Iterating over the results per row for await (const row of results) { // execute something } 5b. Save the results to a file. For saving SPARQL construct queries: // Saving the results of a SPARQL construct query to a file. await results.toFile('my-file.nt') For saving SPARQL select queries. Currently we only support saving the file to a .tsv format: // Saving the results of a SPARQL select query to a file. await results.toFile('my-file.tsv') 5c. Load all results into memory. Note that this is almost never used. If you want to process results, then option 5a is better; if you want to persist results, then option 5b is better. // Loading results for a SPARQL construct or SPARQL select query into memory. const array = await results.toArray() What is an async iterator? \u00b6 TriplyDB.js makes use of async iterators for retrieving lists of objects. Async iterators are a method of fetching and iterating through large lists, without having to first fetch the whole set. An example of an async iterator in TriplyDB.js is App.getAccounts() . The following code illustrates how it can be used. for await (const account of triply.getAccounts()) { console.log(account) } For cases where you want the complete list, you can use the toArray function of the iterator. const accounts = await triply.getAccounts().toArray() TriplyDB.js returns async iterators from the following methods: App.getAccounts() Account.getDatasets() Account.getQueries() Account.getStories() Dataset.getServices() Dataset.getAssets() Dataset.getGraphs() Dataset.getStatements() Query.results().statements() for SPARQL construct and describe queries Query.results().bindings() for SPARQL select queries","title":"FAQ"},{"location":"triplydb-js/faq/#faq","text":"This section includes answers to frequently asked questions. Please contact info@triply.cc if you have a question that does not appear in this list.","title":"FAQ"},{"location":"triplydb-js/faq/#how-to-perform-a-sparql-query","text":"The SPARQL 1.1 Protocol standard specifies a native HTTP API for performing SPARQL requests. Such requests can be performed with regular HTTP libraries. Here we give an example indicating how such an HTTP library can be used: import SuperAgent from 'superagent'; const reply = await SuperAgent.post('SPARQL_ENDPOINT') .set('Accept', 'application/sparql-results+json') .set('Authorization', 'Bearer ' + process.env.TOKEN) .buffer(true) .send({ query: 'select * { WHERE_CLAUSE } offset 0 limit 10000' }) // break condition when the result set is empty. // downsides: caching, string manipulation","title":"How to perform a SPARQL query?"},{"location":"triplydb-js/faq/#what-is-the-latest-version-of-triplydbjs","text":"The latest version of TriplyDB.js can be found in the NPM repository .","title":"What is the latest version of TriplyDB.js?"},{"location":"triplydb-js/faq/#what-to-do-when-the-error-unauthorized-appears","text":"This error appears whenever an operation is performed for which the user denoted by the current API token is not authorized. One common appearance of this error is when the environment variable TOKEN is not set to an API token. The current value of the environment variable can be tested by running the following command in the terminal: echo $TOKEN","title":"What to do when the \u201cError: Unauthorized\u201d appears?"},{"location":"triplydb-js/faq/#how-do-i-get-the-results-of-a-saved-query-using-triplydbjs","text":"To reliably retrieve a large number of results as the output of a construct or select query, follow these steps: 1. Import the triplydb library. import App from '@triply/triplydb'; 2. Set your parameters, regarding the TriplyDB server and the account in which you have saved the query as well as the name of the query. const triply = App.get({ url: 'https://api.triplydb.com' }) const account = await triply.getAccount('account-name') const query = await account.getQuery('name-of-some-query') If the query is not public, you should set your API token rather than the URL. const triply = App.get({ token: process.env.TOKEN }) 3. Do not forget that we perform TriplyDB.js requests within an async context. That is: async function run() { // Your code goes here. } run() 4. Get the results of a query by setting a results variable. More specifically, for construct queries: const results = query.results().statements() For select queries: const results = query.results().bindings() Note that for SPARQL construct queries, we use method .statements() , while for SPARQL select queries, we use method .bindings() . Additionally, saved queries can have API variables that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows: // For SPARQL construct queries. const results = query .results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable', }) .statements() // For SPARQL select queries. const results = query .results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable', }) .bindings() 5. To read the results you have three options: 5a. Iterate through the results per row in a for -loop: // Iterating over the results per row for await (const row of results) { // execute something } 5b. Save the results to a file. For saving SPARQL construct queries: // Saving the results of a SPARQL construct query to a file. await results.toFile('my-file.nt') For saving SPARQL select queries. Currently we only support saving the file to a .tsv format: // Saving the results of a SPARQL select query to a file. await results.toFile('my-file.tsv') 5c. Load all results into memory. Note that this is almost never used. If you want to process results, then option 5a is better; if you want to persist results, then option 5b is better. // Loading results for a SPARQL construct or SPARQL select query into memory. const array = await results.toArray()","title":"How do I get the results of a saved query using TriplyDB.js?"},{"location":"triplydb-js/faq/#what-is-an-async-iterator","text":"TriplyDB.js makes use of async iterators for retrieving lists of objects. Async iterators are a method of fetching and iterating through large lists, without having to first fetch the whole set. An example of an async iterator in TriplyDB.js is App.getAccounts() . The following code illustrates how it can be used. for await (const account of triply.getAccounts()) { console.log(account) } For cases where you want the complete list, you can use the toArray function of the iterator. const accounts = await triply.getAccounts().toArray() TriplyDB.js returns async iterators from the following methods: App.getAccounts() Account.getDatasets() Account.getQueries() Account.getStories() Dataset.getServices() Dataset.getAssets() Dataset.getGraphs() Dataset.getStatements() Query.results().statements() for SPARQL construct and describe queries Query.results().bindings() for SPARQL select queries","title":"What is an async iterator?"},{"location":"triplydb-js/graph/","text":"On this page: Graph Examples Graph.delete() Examples Graph.getInfo() Examples Graph.rename(name: string) Examples Get the data locally Graph.toFile(destinationPath: string, arguments?: object) Optional Examples Graph.toStore(graph?: Graph) Examples Graph.toStream(type: 'compressed' | 'rdf-js', arguments?: object) Optional Examples Graph \u00b6 Each dataset with data consists out of one or more named graphs. All graphs together is thus the collection of triples of the dataset. Often the graph is used to denote a part of the dataset. For example the data model of the dataset or the metadata of the dataset. A graph has as advantage that is can partition data while at the same time keep the data in the same dataset. Reducing the overhead of having to move between datasets to traverse a graph. You can retrieve either retrieve all graphs from a dataset in the form of an async iterator. Or retrieve a specific graph from a dataset. Examples \u00b6 The following snippet retrieves the graph 'https://example.com/my-graph' for a specific dataset: const user = await triply.getUser() const dataset = await user.getDataset('my-dataset') const graph = await dataset.getGraph('https://example.com/my-graph') The following snippet retrieves all the graphs for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const graphs = dataset.getGraphs() The Graph is the smallest object that can be individually deleted or modified. Graph.delete() \u00b6 Deletes the graph of this dataset. Any copies of the graph will not be deleted. All services containing this graph will still contain the graph until the service is synced again. Examples \u00b6 The following snippet deletes a specific graph that is part of the account associated with the current API token: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const graph = await dataset.getGraph('https://example.com/my-graph') await graph.delete() Graph.getInfo() \u00b6 Returns information about this graph. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. The following keys and values are returned for graph.getInfo() id A hexadecimal hash of the graph to identify the graph for internal identification. graphName The URL-friendly name of the graphName that is used as identifier and name. numberOfStatements The number of statements in the graph. uploadedAt (Optional) The date/time at which the graph was uploaded to TriplyDB. importedAt (Optional) The date/time at which the query was imported from another dataset. importedFrom (Optional) graphName The graphname of the graph from the dataset from which the graph was imported. dataset The dataset from which the graph was imported. Examples \u00b6 The following snippet prints the information from the specified graph of the specified dataset of the current user: const user = await triply.getUser() const dataset = await user.getDataset('my-dataset') const graph = await dataset.getGraph('https://example.com/my-graph') console.log(await graph.getInfo()) Graph.rename(name: string) \u00b6 Renames the graph, the argument name is the new graph name. The string argument for name must be a valid IRI. Examples \u00b6 The following snippet renames a specific graph of a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const graph = await dataset.getGraph('https://example.com/my-graph') await dataset.rename('https://example.org/new-graph') Get the data locally \u00b6 Most of the time you do not need to download a graph locally as TriplyDB supports a variety of methods to use linked data directly. But if you want to use a graph locally that is possible with TriplyDB.js . There are three methods to retrieve linked data from a graph. toFile() , toStore() and toStream() . Graph.toFile(destinationPath: string, arguments?: object) \u00b6 The first method downloads the linked data graphs directly and writes the data to the location of the destinationPath . The extension on the destinationPath defines the linked data type that is downloaded. The extensions that are supported are: nt , nq , trig , ttl , jsonld , json . If no extension is set or the extension is not recognized the function will throw an error. Optional \u00b6 The optional properties accepted as arguments for toFile Compressed Argument compressed optionally is an boolean defining if a graph is compresssed with GNU zip (gzip) compression algorithm and will end with a `.gz` extension. Examples \u00b6 The following example downloads the graph to file: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const graph = await dataset.getGraph('https://example.com/my-graph') await graph.toFile('my-filename.ttl', {compressed: true}) Graph.toStore(graph?: Graph) \u00b6 The second method is to download the file into a N3.store . The n3 library is one of the most complete libraries for handling linked data in memory. The N3.js library is an implementation of the RDF.js low-level specification that lets you handle RDF in JavaScript easily, with an asynchronous, streaming approach. To reduce the overhead of downloading your data to file and then insert it in the N3 Store. TriplyDB.js has a toStore() where a N3 store is returned as a result of the the toStore() function. Examples \u00b6 The following example downloads the graph as N3.store : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const graph = await dataset.getGraph('https://example.com/my-graph') const store = await graph.toStore() Graph.toStream(type: 'compressed' | 'rdf-js', arguments?: object) \u00b6 The final method to download linked data to a local source is the toStream this function returns a stream of quads that can directly be iterated over. The Stream is either of the type compressed which returns a gzipped stream of linked data, or type rdf-js which returns a stream of quads parsed according to the rdf-js standard . Optional \u00b6 The following arguments can be defined in the optional arguments object. Extension Argument Extension optionally defines the linked data type that is streamed. The extensions that are supported are: `nt`, `nq`, `trig`, `ttl`, `jsonld`, `json`. Examples \u00b6 The following example streams through the graph as rdf-js quad objects. and prints the quad to the screen. notice that the stream is an async iterator. Example 1 const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const graph = await dataset.getGraph('https://example.com/my-graph') const stream = await graph.toStream('rdf-js', {extension: '.nq'}) for await(const quad of stream){ console.log(quad) } The following example streams through the graph as chunks of ttl. and prints the buffer to the screen. Example 2 const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const graph = await dataset.getGraph('https://example.com/my-graph') const stream = await graph.toStream('compressed', {extension: '.ttl'}) for await(const quad of stream.pipe(zlib.createGunzip())){ console.log((quad as Buffer).toString()) }","title":"Graph"},{"location":"triplydb-js/graph/#graph","text":"Each dataset with data consists out of one or more named graphs. All graphs together is thus the collection of triples of the dataset. Often the graph is used to denote a part of the dataset. For example the data model of the dataset or the metadata of the dataset. A graph has as advantage that is can partition data while at the same time keep the data in the same dataset. Reducing the overhead of having to move between datasets to traverse a graph. You can retrieve either retrieve all graphs from a dataset in the form of an async iterator. Or retrieve a specific graph from a dataset.","title":"Graph"},{"location":"triplydb-js/graph/#examples","text":"The following snippet retrieves the graph 'https://example.com/my-graph' for a specific dataset: const user = await triply.getUser() const dataset = await user.getDataset('my-dataset') const graph = await dataset.getGraph('https://example.com/my-graph') The following snippet retrieves all the graphs for a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const graphs = dataset.getGraphs() The Graph is the smallest object that can be individually deleted or modified.","title":"Examples"},{"location":"triplydb-js/graph/#graphdelete","text":"Deletes the graph of this dataset. Any copies of the graph will not be deleted. All services containing this graph will still contain the graph until the service is synced again.","title":"Graph.delete()"},{"location":"triplydb-js/graph/#examples_1","text":"The following snippet deletes a specific graph that is part of the account associated with the current API token: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const graph = await dataset.getGraph('https://example.com/my-graph') await graph.delete()","title":"Examples"},{"location":"triplydb-js/graph/#graphgetinfo","text":"Returns information about this graph. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. The following keys and values are returned for graph.getInfo() id A hexadecimal hash of the graph to identify the graph for internal identification. graphName The URL-friendly name of the graphName that is used as identifier and name. numberOfStatements The number of statements in the graph. uploadedAt (Optional) The date/time at which the graph was uploaded to TriplyDB. importedAt (Optional) The date/time at which the query was imported from another dataset. importedFrom (Optional) graphName The graphname of the graph from the dataset from which the graph was imported. dataset The dataset from which the graph was imported.","title":"Graph.getInfo()"},{"location":"triplydb-js/graph/#examples_2","text":"The following snippet prints the information from the specified graph of the specified dataset of the current user: const user = await triply.getUser() const dataset = await user.getDataset('my-dataset') const graph = await dataset.getGraph('https://example.com/my-graph') console.log(await graph.getInfo())","title":"Examples"},{"location":"triplydb-js/graph/#graphrenamename-string","text":"Renames the graph, the argument name is the new graph name. The string argument for name must be a valid IRI.","title":"Graph.rename(name: string)"},{"location":"triplydb-js/graph/#examples_3","text":"The following snippet renames a specific graph of a specific dataset: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const graph = await dataset.getGraph('https://example.com/my-graph') await dataset.rename('https://example.org/new-graph')","title":"Examples"},{"location":"triplydb-js/graph/#get-the-data-locally","text":"Most of the time you do not need to download a graph locally as TriplyDB supports a variety of methods to use linked data directly. But if you want to use a graph locally that is possible with TriplyDB.js . There are three methods to retrieve linked data from a graph. toFile() , toStore() and toStream() .","title":"Get the data locally"},{"location":"triplydb-js/graph/#graphtofiledestinationpath-string-arguments-object","text":"The first method downloads the linked data graphs directly and writes the data to the location of the destinationPath . The extension on the destinationPath defines the linked data type that is downloaded. The extensions that are supported are: nt , nq , trig , ttl , jsonld , json . If no extension is set or the extension is not recognized the function will throw an error.","title":"Graph.toFile(destinationPath: string, arguments?: object)"},{"location":"triplydb-js/graph/#optional","text":"The optional properties accepted as arguments for toFile Compressed Argument compressed optionally is an boolean defining if a graph is compresssed with GNU zip (gzip) compression algorithm and will end with a `.gz` extension.","title":"Optional"},{"location":"triplydb-js/graph/#examples_4","text":"The following example downloads the graph to file: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const graph = await dataset.getGraph('https://example.com/my-graph') await graph.toFile('my-filename.ttl', {compressed: true})","title":"Examples"},{"location":"triplydb-js/graph/#graphtostoregraph-graph","text":"The second method is to download the file into a N3.store . The n3 library is one of the most complete libraries for handling linked data in memory. The N3.js library is an implementation of the RDF.js low-level specification that lets you handle RDF in JavaScript easily, with an asynchronous, streaming approach. To reduce the overhead of downloading your data to file and then insert it in the N3 Store. TriplyDB.js has a toStore() where a N3 store is returned as a result of the the toStore() function.","title":"Graph.toStore(graph?: Graph)"},{"location":"triplydb-js/graph/#examples_5","text":"The following example downloads the graph as N3.store : const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const graph = await dataset.getGraph('https://example.com/my-graph') const store = await graph.toStore()","title":"Examples"},{"location":"triplydb-js/graph/#graphtostreamtype-compressed-rdf-js-arguments-object","text":"The final method to download linked data to a local source is the toStream this function returns a stream of quads that can directly be iterated over. The Stream is either of the type compressed which returns a gzipped stream of linked data, or type rdf-js which returns a stream of quads parsed according to the rdf-js standard .","title":"Graph.toStream(type: 'compressed' | 'rdf-js', arguments?: object)"},{"location":"triplydb-js/graph/#optional_1","text":"The following arguments can be defined in the optional arguments object. Extension Argument Extension optionally defines the linked data type that is streamed. The extensions that are supported are: `nt`, `nq`, `trig`, `ttl`, `jsonld`, `json`.","title":"Optional"},{"location":"triplydb-js/graph/#examples_6","text":"The following example streams through the graph as rdf-js quad objects. and prints the quad to the screen. notice that the stream is an async iterator. Example 1 const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const graph = await dataset.getGraph('https://example.com/my-graph') const stream = await graph.toStream('rdf-js', {extension: '.nq'}) for await(const quad of stream){ console.log(quad) } The following example streams through the graph as chunks of ttl. and prints the buffer to the screen. Example 2 const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('pokemon') const graph = await dataset.getGraph('https://example.com/my-graph') const stream = await graph.toStream('compressed', {extension: '.ttl'}) for await(const quad of stream.pipe(zlib.createGunzip())){ console.log((quad as Buffer).toString()) }","title":"Examples"},{"location":"triplydb-js/organization/","text":"On this page: Organization Obtaining instances Inheritance Organization.addDataset(name: string, metadata?: object) Organization.addMember(user: User, role?: Role) Arguments Examples Organization.removeMember(user: User) Organization.addQuery(name: string, metadata: object) Organization.ensureStory(name: string, metadata: object) Organization.addStory(name: string, metadata?: object) Organization.delete() Examples Organization.ensureDataset(name: string, metadata?: object) Organization.getDataset(name: string) Organization.getDatasets() Organization.getMembers() Return type Examples See also Organization.getPinnedItems() Organization.removeMember(user: User) Arguments Existence considerations Examples Organization.setAvatar(file: string) Organization.update(metadata: object) Organization \u00b6 Instances of class Organization denote organizations in TriplyDB. Obtaining instances \u00b6 Organizations are obtained with method App.getOrganization(name: string) : const organization = await triply.getOrganization('Triply') Alternatively, organizations are obtained by first obtaining an account ( App.getAccount(name?: string) ) and then casting it to an organization ( Account.asOrganization() ): const account = await triply.getAccount('Triply') const organization = account.asOrganization() Inheritance \u00b6 Organization is a subclass of Account , from which it inherits most of its methods. Organization.addDataset(name: string, metadata?: object) \u00b6 Adds a new TriplyDB dataset with the given name to the current organization. Inherited from Account.addDataset(name: string, metadata?: object) . Organization.addMember(user: User, role?: Role) \u00b6 Adds a member to the given Organization , with the given role of either member or owner. Arguments \u00b6 The user argument has to be a user object of the user which should be added to the organization. The role argument can be either 'member' or 'owner' . If this argument is not specified, then 'member' is used as the default. 'member' A regular member that is allowed to read and write the datasets that are published under the organization. 'owner' An owner of the organization. Owners have all the rights of regular users, plus the ability to add/remove users to/from the organization, the ability to change the roles of existing users, and the ability to delete the organization. Examples \u00b6 user The following snippet adds user John Doe to the Triply organization as a regular member. const organization = await triply.getOrganization('Triply') const johnDoe = await app.getUser('john-doe') await organization.addMember(johnDoe) Organization.removeMember(user: User) \u00b6 Removes a member from the given Organization . Organization.addQuery(name: string, metadata: object) \u00b6 Adds a new TriplyDB query to the current organization. Inherited from Account.addQuery(name: string, metadata: object) . Organization.ensureStory(name: string, metadata: object) \u00b6 Ensures the existence of a story with the given name and with the specified metadata . Inherited from Account.ensureStory(name: string, metadata: object) . Organization.addStory(name: string, metadata?: object) \u00b6 Adds a new TriplyDB story with the given name to the current organization. Inherited from Account.addStory(name: string, metadata?: object) . Organization.delete() \u00b6 Deletes this account. This also deletes all datasets, stories and queries that belong to this organization. Examples \u00b6 The following code example deletes the specified organization: const organization = await triply.getOrganization('Neo4j') await organization.delete() Organization.ensureDataset(name: string, metadata?: object) \u00b6 Ensures the existence of a dataset with the given name and with the specified metadata . Inherited from Account.ensureDataset(name: string, metadata?: object) . Organization.getDataset(name: string) \u00b6 Returns the dataset with the given name that is published by this organization. Inherited from Account.getDataset(name: string) . Organization.getDatasets() \u00b6 Returns an async iterator over the accessible datasets that belong to this organization. Inherited from Account.getDatasets() . Organization.getMembers() \u00b6 Returns the list of memberships for the given organization. Return type \u00b6 A membership contains the following components: role The role of the membership ( OrgRole ): either 'owner' for owners of the organization, or 'member' for regular members. The difference between owners and regular members is that owners can perform user management for the organization (add/remove/change memberships). user An instance of class User . createdAt A date/time string. updatedAt A date/time string. Examples \u00b6 const org = await triply.getOrganization('acme') for (const membership of await org.getMembers()) { console.log(user) } See also \u00b6 Memberships of organization are TriplyDB users . Organization.getPinnedItems() \u00b6 Returns the list of datasets, stories and queries that are pinned for the current organization. Inherited from Account.getPinnedItems() . Organization.removeMember(user: User) \u00b6 Removes the specified user from this organization. Arguments \u00b6 The user argument has to be a User object of a user. Existence considerations \u00b6 The user must be a current member of the organization for this method to succeed. If the user is not a current member of the organization, an error is thrown. Examples \u00b6 The following snippet removes John Doe from the Triply organization, using a string argument: const organization = await triply.getOrganization('Triply') const johnDoe = await app.getUser('john-doe') await organization.removeMember(johnDoe) The following snippet removes John Doe from the Triply organization, using a User object: const organization = await triply.getOrganization('Triply') const user = await triply.getUser('john-doe') await organization.removeMember(user) Organization.setAvatar(file: string) \u00b6 Sets a new image that characterized this organization. Inherited from Account.setAvatar(file: string) . Organization.update(metadata: object) \u00b6 Updates the metadata for this account. Inherited from Account.update(metadata: object) .","title":"Organization"},{"location":"triplydb-js/organization/#organization","text":"Instances of class Organization denote organizations in TriplyDB.","title":"Organization"},{"location":"triplydb-js/organization/#obtaining-instances","text":"Organizations are obtained with method App.getOrganization(name: string) : const organization = await triply.getOrganization('Triply') Alternatively, organizations are obtained by first obtaining an account ( App.getAccount(name?: string) ) and then casting it to an organization ( Account.asOrganization() ): const account = await triply.getAccount('Triply') const organization = account.asOrganization()","title":"Obtaining instances"},{"location":"triplydb-js/organization/#inheritance","text":"Organization is a subclass of Account , from which it inherits most of its methods.","title":"Inheritance"},{"location":"triplydb-js/organization/#organizationadddatasetname-string-metadata-object","text":"Adds a new TriplyDB dataset with the given name to the current organization. Inherited from Account.addDataset(name: string, metadata?: object) .","title":"Organization.addDataset(name: string, metadata?: object)"},{"location":"triplydb-js/organization/#organizationaddmemberuser-user-role-role","text":"Adds a member to the given Organization , with the given role of either member or owner.","title":"Organization.addMember(user: User, role?: Role)"},{"location":"triplydb-js/organization/#arguments","text":"The user argument has to be a user object of the user which should be added to the organization. The role argument can be either 'member' or 'owner' . If this argument is not specified, then 'member' is used as the default. 'member' A regular member that is allowed to read and write the datasets that are published under the organization. 'owner' An owner of the organization. Owners have all the rights of regular users, plus the ability to add/remove users to/from the organization, the ability to change the roles of existing users, and the ability to delete the organization.","title":"Arguments"},{"location":"triplydb-js/organization/#examples","text":"user The following snippet adds user John Doe to the Triply organization as a regular member. const organization = await triply.getOrganization('Triply') const johnDoe = await app.getUser('john-doe') await organization.addMember(johnDoe)","title":"Examples"},{"location":"triplydb-js/organization/#organizationremovememberuser-user","text":"Removes a member from the given Organization .","title":"Organization.removeMember(user: User)"},{"location":"triplydb-js/organization/#organizationaddqueryname-string-metadata-object","text":"Adds a new TriplyDB query to the current organization. Inherited from Account.addQuery(name: string, metadata: object) .","title":"Organization.addQuery(name: string, metadata: object)"},{"location":"triplydb-js/organization/#organizationensurestoryname-string-metadata-object","text":"Ensures the existence of a story with the given name and with the specified metadata . Inherited from Account.ensureStory(name: string, metadata: object) .","title":"Organization.ensureStory(name: string, metadata: object)"},{"location":"triplydb-js/organization/#organizationaddstoryname-string-metadata-object","text":"Adds a new TriplyDB story with the given name to the current organization. Inherited from Account.addStory(name: string, metadata?: object) .","title":"Organization.addStory(name: string, metadata?: object)"},{"location":"triplydb-js/organization/#organizationdelete","text":"Deletes this account. This also deletes all datasets, stories and queries that belong to this organization.","title":"Organization.delete()"},{"location":"triplydb-js/organization/#examples_1","text":"The following code example deletes the specified organization: const organization = await triply.getOrganization('Neo4j') await organization.delete()","title":"Examples"},{"location":"triplydb-js/organization/#organizationensuredatasetname-string-metadata-object","text":"Ensures the existence of a dataset with the given name and with the specified metadata . Inherited from Account.ensureDataset(name: string, metadata?: object) .","title":"Organization.ensureDataset(name: string, metadata?: object)"},{"location":"triplydb-js/organization/#organizationgetdatasetname-string","text":"Returns the dataset with the given name that is published by this organization. Inherited from Account.getDataset(name: string) .","title":"Organization.getDataset(name: string)"},{"location":"triplydb-js/organization/#organizationgetdatasets","text":"Returns an async iterator over the accessible datasets that belong to this organization. Inherited from Account.getDatasets() .","title":"Organization.getDatasets()"},{"location":"triplydb-js/organization/#organizationgetmembers","text":"Returns the list of memberships for the given organization.","title":"Organization.getMembers()"},{"location":"triplydb-js/organization/#return-type","text":"A membership contains the following components: role The role of the membership ( OrgRole ): either 'owner' for owners of the organization, or 'member' for regular members. The difference between owners and regular members is that owners can perform user management for the organization (add/remove/change memberships). user An instance of class User . createdAt A date/time string. updatedAt A date/time string.","title":"Return type"},{"location":"triplydb-js/organization/#examples_2","text":"const org = await triply.getOrganization('acme') for (const membership of await org.getMembers()) { console.log(user) }","title":"Examples"},{"location":"triplydb-js/organization/#see-also","text":"Memberships of organization are TriplyDB users .","title":"See also"},{"location":"triplydb-js/organization/#organizationgetpinneditems","text":"Returns the list of datasets, stories and queries that are pinned for the current organization. Inherited from Account.getPinnedItems() .","title":"Organization.getPinnedItems()"},{"location":"triplydb-js/organization/#organizationremovememberuser-user_1","text":"Removes the specified user from this organization.","title":"Organization.removeMember(user: User)"},{"location":"triplydb-js/organization/#arguments_1","text":"The user argument has to be a User object of a user.","title":"Arguments"},{"location":"triplydb-js/organization/#existence-considerations","text":"The user must be a current member of the organization for this method to succeed. If the user is not a current member of the organization, an error is thrown.","title":"Existence considerations"},{"location":"triplydb-js/organization/#examples_3","text":"The following snippet removes John Doe from the Triply organization, using a string argument: const organization = await triply.getOrganization('Triply') const johnDoe = await app.getUser('john-doe') await organization.removeMember(johnDoe) The following snippet removes John Doe from the Triply organization, using a User object: const organization = await triply.getOrganization('Triply') const user = await triply.getUser('john-doe') await organization.removeMember(user)","title":"Examples"},{"location":"triplydb-js/organization/#organizationsetavatarfile-string","text":"Sets a new image that characterized this organization. Inherited from Account.setAvatar(file: string) .","title":"Organization.setAvatar(file: string)"},{"location":"triplydb-js/organization/#organizationupdatemetadata-object","text":"Updates the metadata for this account. Inherited from Account.update(metadata: object) .","title":"Organization.update(metadata: object)"},{"location":"triplydb-js/query/","text":"On this page: Query Query.delete() Query.getInfo() Query.getString(apiVariables?: object) Examples Query.addVersion(metadata: object) Arguments Query.getRunLink() Query.results(apiVariables?: object, options?: object) Examples Query.update(metadata: object) Arguments Query.useVersion(version: number|'latest') Example Query.copy(queryName?: string, account?:object, metadataToReplace?: object) Arguments Example Query \u00b6 A Saved Query is a versioned SPARQL query with its own URL. Using this URL, users are able to view any version of the query and its results. It can also be used to run the query and retrieve the results from a browser or a program, removing the hassle of figuring out how to run a SPARQL query. Saved queries come with a RESTful API that can be configured with the use a SPARQL API variables. Query.delete() \u00b6 Permanently deletes this query and all of its versions. Query.getInfo() \u00b6 The returned dictionary object includes the following keys: accessLevel The access level of the query. The following values are possible: 'private' (default) The dataset can only be accessed by organization members. 'internal' The dataset can only be accessed by users that are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. autoselectService Whether the SPARQL service is automatically chosen ( true ), or whether a specific SPARQL service is configured ( false ). createdAt The date/time at which the query was created. dataset A dictionary object representing the dataset against which the query is evaluated. description The human-readable description of the query. This typically explains what the query does in natural language. displayName The human-readable name of the query. This name may include spaces and other characters that are not allowed in the URL-friendly name. name The URL-friendly name of the query that is used in URL paths. This name can only include ASCII letters and hyphens. numberOfVersions The number of currently stored versions of this query. owner A dictionary object representing the account (organization or user) to which the query belongs. \ud83d\udea7 link Stores part of the URL to run the query. Please use Query.getRunLink() to obtain the full URL to run the query. service The location of the SPARQL endpoint that is used to run the query. updatedAt The date/time at which the query was last modified. Query.getString(apiVariables?: object) \u00b6 Returns the query string of the current version of this query. Optionally, arguments can be specified for the API variables to this query. Examples \u00b6 The following code stores the SPARQL query string for the query object: const queryString = await query.getString() Query.addVersion(metadata: object) \u00b6 Adds a new version to the query used. It requires similar options to that of Query.addQuery . Arguments \u00b6 At least one of the following arguments is required to create a new version. Any argument not given will be copied from the previous version of that query. queryString: string the SPARQL compliant query as a string value output: string The visualization plugin that is used to display the result set. If none is set it defaults to 'table' . Other options may include: 'response' , 'geo' , 'gallery' , 'markup' , etc. Output will take priority over ldFrame ldFrame: object JSON LD frame object used to transform plain JSON LD into a framed JSON. Will be used only if an output is not provided. variables: Variable[] A list of objects with the following keys: IRI variable An object of the form `Variable` (see Account.addQuery() ) You can see how many versions exist on a query accessing Query.getInfo().numOfVersions You can use a specified version of a query accessing Query.useVersion(x: number) Query.getRunLink() \u00b6 Returns the URL link to run the query. It currently does not support the use of variables. Query.results(apiVariables?: object, options?: object) \u00b6 Query.results() function will automatically return all the results from a saved query. You can retrieve both results from a select or ask query and a construct or describe query. The results are returned as an async iterator . If there are more than 10 000 query results, they could be retrieved using pagination with TriplyDB.js . Examples \u00b6 Get the results of a query by setting a results variable. More specifically, for construct queries you use the statements() call: const triply = App.get({token: process.env.TOKEN}) const account = await triply.getAccount('account-name') const query = await account.getQuery('name-of-some-query') // For select queries you use the `statements()` call: const results = query.results().statements() // For select queries you use the `bindings()` call: const results = query.results().bindings() Additionally, saved queries can have 'API variables' that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows: const triply = App.get({token: process.env.TOKEN}) const account = await triply.getAccount('account-name') const query = await account.getQuery('name-of-some-query') // For SPARQL construct queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).statements() // For SPARQL select queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).bindings() Query.update(metadata: object) \u00b6 Updates the metadata for the saved query. This does not result in a new query version. It requires similar options to that of Query.addQuery . Arguments \u00b6 At least one of the following arguments is required to update the metadata. Any argument given will be copied from the previous version of that query. accessLevel The access level of the query. The following values are possible: 'private' (default) The dataset can only be accessed by organization members. 'internal' The dataset can only be accessed by users that are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. autoselectService Whether the SPARQL service is automatically chosen ( true ), or whether a specific SPARQL service is configured ( false ). dataset A dictionary object representing the dataset against which the query is evaluated. description The human-readable description of the query. This typically explains what the query does in natural language. displayName The human-readable name of the query. This name may include spaces and other characters that are not allowed in the URL-friendly name. name The URL-friendly name of the query that is used in URL paths. This name can only include ASCII letters and hyphens. preferredService If the autoselectService is not selected the user can set the preferred service. Query.useVersion(version: number|'latest') \u00b6 A saved query is saved with a version number. Each time the query or the visualization changes the version number is incremented with one. When you want to retrieve a saved query with a particular version you need the useVersion function. The function returns the query object corresponding to that version of the query. If you want to use the latest version of the query you need to set the version argument to 'latest' . Example \u00b6 const user = await triply.getAccount('my-account') const query = await user.getQuery('my-query') const query_1 = await query.useVersion(1) Query.copy(queryName?: string, account?:object, metadataToReplace?: object) \u00b6 Copies a query using either the same name or a new name (if queryName is provided) to the current account or a new account (if accountName is provided) with the same metadata or overwritten metadata (if metadataToReplace is provided) Arguments \u00b6 queryName An optional parameter. The new URL-friendly name given to the duplicated query that is used in URL paths. This name can only include ASCII letters and hyphens. Defaults to the original query name. account An optional parameter.Expected to be either an User or an Organization object if provided. The new account to which the query will be copied to. Defaults to the current account metadataToReplace An optional metadata object with optionl properties that can be provided to override any of the existing metadata of the duplicated query if required accessLevel The access level of the query. The following values are possible: 'private' (default) The dataset can only be accessed by organization members. 'internal' The dataset can only be accessed by users that are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. queryString: string the SPARQL compliant query as a string value output: string The visualization plugin that is used to display the result set. If none is set it defaults to 'table' . Other options may include: 'response' , 'geo' , 'gallery' , 'markup' , etc dataset: object A dictionary object representing the dataset against which the query is evaluated. description: string The human-readable description of the query. This typically explains what the query does in natural language. displayName: string The human-readable name of the query. This name may include spaces and other characters that are not allowed in the URL-friendly name. variables: Variable[] A list of objects with the following keys: IRI variable An object of the form `Variable` (see Account.addQuery() ) serviceType: string (\"speedy\" | \"virtuoso\" | \"jena\" | \"blazegraph\" | \"prolog\") The SPARQL service type the duplicated query needs to be configured to Example \u00b6 const user = await triply.getAccount('my-account') const query = await user.getQuery('my-query') const query_1 = await query.useVersion(1) const orgAccount = await triply.getAccount('org-account'); // Within the same account under a new name const duplicatedQuery_1 = await query.copy('newDuplicateQuery') // To a new account with some metadata overwritten using the same query name const duplicatedQuery_2 = await query.copy(undefined, orgAccount , { description: 'newDescription', displayName: 'newDisplayName' })","title":"Query"},{"location":"triplydb-js/query/#query","text":"A Saved Query is a versioned SPARQL query with its own URL. Using this URL, users are able to view any version of the query and its results. It can also be used to run the query and retrieve the results from a browser or a program, removing the hassle of figuring out how to run a SPARQL query. Saved queries come with a RESTful API that can be configured with the use a SPARQL API variables.","title":"Query"},{"location":"triplydb-js/query/#querydelete","text":"Permanently deletes this query and all of its versions.","title":"Query.delete()"},{"location":"triplydb-js/query/#querygetinfo","text":"The returned dictionary object includes the following keys: accessLevel The access level of the query. The following values are possible: 'private' (default) The dataset can only be accessed by organization members. 'internal' The dataset can only be accessed by users that are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. autoselectService Whether the SPARQL service is automatically chosen ( true ), or whether a specific SPARQL service is configured ( false ). createdAt The date/time at which the query was created. dataset A dictionary object representing the dataset against which the query is evaluated. description The human-readable description of the query. This typically explains what the query does in natural language. displayName The human-readable name of the query. This name may include spaces and other characters that are not allowed in the URL-friendly name. name The URL-friendly name of the query that is used in URL paths. This name can only include ASCII letters and hyphens. numberOfVersions The number of currently stored versions of this query. owner A dictionary object representing the account (organization or user) to which the query belongs. \ud83d\udea7 link Stores part of the URL to run the query. Please use Query.getRunLink() to obtain the full URL to run the query. service The location of the SPARQL endpoint that is used to run the query. updatedAt The date/time at which the query was last modified.","title":"Query.getInfo()"},{"location":"triplydb-js/query/#querygetstringapivariables-object","text":"Returns the query string of the current version of this query. Optionally, arguments can be specified for the API variables to this query.","title":"Query.getString(apiVariables?: object)"},{"location":"triplydb-js/query/#examples","text":"The following code stores the SPARQL query string for the query object: const queryString = await query.getString()","title":"Examples"},{"location":"triplydb-js/query/#queryaddversionmetadata-object","text":"Adds a new version to the query used. It requires similar options to that of Query.addQuery .","title":"Query.addVersion(metadata: object)"},{"location":"triplydb-js/query/#arguments","text":"At least one of the following arguments is required to create a new version. Any argument not given will be copied from the previous version of that query. queryString: string the SPARQL compliant query as a string value output: string The visualization plugin that is used to display the result set. If none is set it defaults to 'table' . Other options may include: 'response' , 'geo' , 'gallery' , 'markup' , etc. Output will take priority over ldFrame ldFrame: object JSON LD frame object used to transform plain JSON LD into a framed JSON. Will be used only if an output is not provided. variables: Variable[] A list of objects with the following keys: IRI variable An object of the form `Variable` (see Account.addQuery() ) You can see how many versions exist on a query accessing Query.getInfo().numOfVersions You can use a specified version of a query accessing Query.useVersion(x: number)","title":"Arguments"},{"location":"triplydb-js/query/#querygetrunlink","text":"Returns the URL link to run the query. It currently does not support the use of variables.","title":"Query.getRunLink()"},{"location":"triplydb-js/query/#queryresultsapivariables-object-options-object","text":"Query.results() function will automatically return all the results from a saved query. You can retrieve both results from a select or ask query and a construct or describe query. The results are returned as an async iterator . If there are more than 10 000 query results, they could be retrieved using pagination with TriplyDB.js .","title":"Query.results(apiVariables?: object, options?: object)"},{"location":"triplydb-js/query/#examples_1","text":"Get the results of a query by setting a results variable. More specifically, for construct queries you use the statements() call: const triply = App.get({token: process.env.TOKEN}) const account = await triply.getAccount('account-name') const query = await account.getQuery('name-of-some-query') // For select queries you use the `statements()` call: const results = query.results().statements() // For select queries you use the `bindings()` call: const results = query.results().bindings() Additionally, saved queries can have 'API variables' that allow you to specify variables that are used in the query. Thus, if you have query parameters, pass their values as the first argument to results as follows: const triply = App.get({token: process.env.TOKEN}) const account = await triply.getAccount('account-name') const query = await account.getQuery('name-of-some-query') // For SPARQL construct queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).statements() // For SPARQL select queries. const results = query.results({ someVariable: 'value of someVariable', anotherVariable: 'value of anotherVariable' }).bindings()","title":"Examples"},{"location":"triplydb-js/query/#queryupdatemetadata-object","text":"Updates the metadata for the saved query. This does not result in a new query version. It requires similar options to that of Query.addQuery .","title":"Query.update(metadata: object)"},{"location":"triplydb-js/query/#arguments_1","text":"At least one of the following arguments is required to update the metadata. Any argument given will be copied from the previous version of that query. accessLevel The access level of the query. The following values are possible: 'private' (default) The dataset can only be accessed by organization members. 'internal' The dataset can only be accessed by users that are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. autoselectService Whether the SPARQL service is automatically chosen ( true ), or whether a specific SPARQL service is configured ( false ). dataset A dictionary object representing the dataset against which the query is evaluated. description The human-readable description of the query. This typically explains what the query does in natural language. displayName The human-readable name of the query. This name may include spaces and other characters that are not allowed in the URL-friendly name. name The URL-friendly name of the query that is used in URL paths. This name can only include ASCII letters and hyphens. preferredService If the autoselectService is not selected the user can set the preferred service.","title":"Arguments"},{"location":"triplydb-js/query/#queryuseversionversion-numberlatest","text":"A saved query is saved with a version number. Each time the query or the visualization changes the version number is incremented with one. When you want to retrieve a saved query with a particular version you need the useVersion function. The function returns the query object corresponding to that version of the query. If you want to use the latest version of the query you need to set the version argument to 'latest' .","title":"Query.useVersion(version: number|'latest')"},{"location":"triplydb-js/query/#example","text":"const user = await triply.getAccount('my-account') const query = await user.getQuery('my-query') const query_1 = await query.useVersion(1)","title":"Example"},{"location":"triplydb-js/query/#querycopyqueryname-string-accountobject-metadatatoreplace-object","text":"Copies a query using either the same name or a new name (if queryName is provided) to the current account or a new account (if accountName is provided) with the same metadata or overwritten metadata (if metadataToReplace is provided)","title":"Query.copy(queryName?: string, account?:object, metadataToReplace?: object)"},{"location":"triplydb-js/query/#arguments_2","text":"queryName An optional parameter. The new URL-friendly name given to the duplicated query that is used in URL paths. This name can only include ASCII letters and hyphens. Defaults to the original query name. account An optional parameter.Expected to be either an User or an Organization object if provided. The new account to which the query will be copied to. Defaults to the current account metadataToReplace An optional metadata object with optionl properties that can be provided to override any of the existing metadata of the duplicated query if required accessLevel The access level of the query. The following values are possible: 'private' (default) The dataset can only be accessed by organization members. 'internal' The dataset can only be accessed by users that are logged into the TriplyDB server. 'public' The dataset can be accessed by everybody. queryString: string the SPARQL compliant query as a string value output: string The visualization plugin that is used to display the result set. If none is set it defaults to 'table' . Other options may include: 'response' , 'geo' , 'gallery' , 'markup' , etc dataset: object A dictionary object representing the dataset against which the query is evaluated. description: string The human-readable description of the query. This typically explains what the query does in natural language. displayName: string The human-readable name of the query. This name may include spaces and other characters that are not allowed in the URL-friendly name. variables: Variable[] A list of objects with the following keys: IRI variable An object of the form `Variable` (see Account.addQuery() ) serviceType: string (\"speedy\" | \"virtuoso\" | \"jena\" | \"blazegraph\" | \"prolog\") The SPARQL service type the duplicated query needs to be configured to","title":"Arguments"},{"location":"triplydb-js/query/#example_1","text":"const user = await triply.getAccount('my-account') const query = await user.getQuery('my-query') const query_1 = await query.useVersion(1) const orgAccount = await triply.getAccount('org-account'); // Within the same account under a new name const duplicatedQuery_1 = await query.copy('newDuplicateQuery') // To a new account with some metadata overwritten using the same query name const duplicatedQuery_2 = await query.copy(undefined, orgAccount , { description: 'newDescription', displayName: 'newDisplayName' })","title":"Example"},{"location":"triplydb-js/service/","text":"On this page: Service Service.delete() Examples Service.getInfo() Examples Service.isUpToDate() Synchronization Examples Service.update( opts?) Examples Service.waitUntilRunning() Example Setting up index templates for ElasticSearch service Index templates Component templates Service \u00b6 Service objects describe specific functionalities that can be created over datasets in TriplyDB. Service objects are obtained through the the following methods: Dataset.addService Dataset.getServices A service always has one of the following statuses: Removing The service is being removed. Running The service is running normally. Starting The service is starting up. Stopped The services was stopped in the past. It cannot be used at the moment, but it can be enable again if needed. Stopping The service is currently being stopped. Service.delete() \u00b6 Permanently deletes this service. Examples \u00b6 const user = await triply.getAccount('my-account') const dataset = await user.getDataset('my-dataset') const service = await dataset.addService('my-service') await service.delete() Service.getInfo() \u00b6 Returns information about this service. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. Examples \u00b6 The following snippet prints information about the newly created service (named my-service ): const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const service = await dataset.addService('my-service') console.log(await service.getInfo()) Service.isUpToDate() \u00b6 Returns whether this service is synchronized with the dataset contents. Synchronization \u00b6 Because services must be explicitly synchronized in TriplyDB, it is possible to have services that expose an older version of the dataset and services that expose a newer version of the dataset running next to one another. There are two very common use cases for this: The production version of an application or website runs on an older service. The data does not change, so the application keeps working. The acceptance version of the same application or website runs on a newer service. Once the acceptance version is finished, it becomes the production version and a new service for the new acceptance version is created, etc. An old service is used by legacy software. New users are using the newer endpoint over the current version of the data, but a limited number of older users want to use the legacy version. Examples \u00b6 The following code checks whether a specific service is synchronized: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const service = await dataset.ensureService('my-service', {type: 'sparql'}) console.log(await service.isUpToDate()) Service.update( opts?) \u00b6 Synchronizes the service. Synchronization means that the data that is used in the service is made consistent with the data that is present in the graphs of the dataset. When one or more graphs are added or deleted, existing services keep exposing the old state of the data. The changes in the data are only exposed in the services after synchronization is performed. You can choose to perform a rolling update, during which a new service is replacing the old one, in order to eliminate downtime. For a rolling update you should use the object {rollingUpdate:true} as opts . In case you want to be able to track the progress of the rolling update, you can use a logging function in the second argument of opts , called onProgress . However, be aware that when using a rolling update, depending on your dataset statements, you might reach your instance's limit of statements' number for services. In this case, you will not be able to do a successful rolling update and you should switch to a regular update. Examples \u00b6 When there are multiple services, it is common to synchronize them all in sequence . This ensures that there are always one or more services available. This allows applications to use such services as their backend without any downtime during data changes. The following code synchronizes all services of a dataset in sequence: for (const service of await dataset.getServices()) { service.update() } // For a rolling update for (const service of await dataset.getServices()) { service.update({ rollingUpdate: true }) } Although less common, it is also possible to synchronize all services of a dataset in parallel . This is typically not used in production systems, where data changes must not result in any downtime. Still, parallel synchronization can be useful in development and/or acceptance environments. The following code synchronizes all services of a dataset in parallel: await Promise.all(dataset.getServices().map(service => service.update())) Service.waitUntilRunning() \u00b6 A service can be stopped or updated. The use of asynchronous code means that when a start command is given it takes a while before the service is ready for use. To make sure a service is available for querying you can user the function waitUntilRunning() to make sure that the script will wait until the service is ready for use. Example \u00b6 An example of a service being updated and afterwards a query needs to be executed: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('some-dataset') const service = await dataset.getService('some-service') // starting a service but does not wait until it is started await service.start() // Function that checks if a service is available await service.waitUntilRunning() Setting up index templates for ElasticSearch service \u00b6 TriplyDB allows you to configure a custom mapping for Elasticsearch services in TriplyDB using index templates. Index templates \u00b6 Index templates make it possible to create indices with user defined configuration, which an index can then pull from. A template will be defined with a name pattern and some configuration in it. If the name of the index matches the template\u2019s naming pattern, the new index will be created with the configuration defined in the template. Official documentation from ElasticSearch on how to use Index templates can be found here . Index templates on TriplyDB can be configured through either TriplyDB API or TriplyDB.js. When creating a new service for the dataset, we add the config object to the metadata: Dataset.addService(\"SERVICE_NAME\", { type: \"elasticSearch\", config: { indexTemplates: [ { \"index_patterns\": \"index\", \"name\": \"TEMPLATE_NAME\", ... } ] } }) index_patterns and name are obligatory fields to include in the body of index template. It's important that every index template has the field index_patterns equal index ! Below is an example of creating an index template in TriplyDB-JS: import App from '@triply/triplydb/App.js' import dotenv from 'dotenv' dotenv.config() const app = App.get({ token: process.env.TRIPLYDB_TOKEN }) const account = await app.getAccount('ACCOUNT') const dataset = await account.getDataset('DATASET') await dataset.addService('SERVICE_NAME', { \"type\": \"elasticSearch\", \"config\": { \"indexTemplates\": [ { \"name\": \"TEMPLATE_NAME\", \"index_patterns\": \"index\" } ] } }) Component templates \u00b6 Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. You can find the official documentation on their use in ElasticSearch here . They can be configured through either TriplyDB API or TriplyDB-JS . When creating a new service for the dataset, we add the config object to the metadata: Dataset.addService(\"SERVICE_NAME\", { type: \"elasticSearch\", config: { componentTemplates: [ { \"name\": \"TEMPLATE_NAME\", \"template\": { \"mappings\": { \"properties\": { ... } } }, ... } ] } }) name and template are obligatory fields to include in the body of component template. Component template can only be created together with an index template. In this case Index template needs to contain the field composed_of with the name of the component template. Below is an example of creating a component template for the property https://schema.org/dateCreated to be of type date . import App from '@triply/triplydb/App.js' import dotenv from 'dotenv' dotenv.config() const app = App.get({ token: process.env.TRIPLYDB_TOKEN }) const account = await app.getAccount('ACCOUNT') const dataset = await account.getDataset('DATASET') await dataset.addService('SERVICE_NAME', { \"type\": \"elasticSearch\", \"config\": { \"indexTemplates\": [ { \"name\": \"TEMPLATE_NAME\", \"index_patterns\": \"index\", \"composed_of\": [\"COMPONENT_TEMPLATE_NAME\"], } ], \"componentTemplates\": [ { \"name\": \"COMPONENT_TEMPLATE_NAME\", \"template\": { \"mappings\": { \"properties\": { \"https://schema org/dateCreated\": { \"type\": \"date\" } } } } } ] } })","title":"Service"},{"location":"triplydb-js/service/#service","text":"Service objects describe specific functionalities that can be created over datasets in TriplyDB. Service objects are obtained through the the following methods: Dataset.addService Dataset.getServices A service always has one of the following statuses: Removing The service is being removed. Running The service is running normally. Starting The service is starting up. Stopped The services was stopped in the past. It cannot be used at the moment, but it can be enable again if needed. Stopping The service is currently being stopped.","title":"Service"},{"location":"triplydb-js/service/#servicedelete","text":"Permanently deletes this service.","title":"Service.delete()"},{"location":"triplydb-js/service/#examples","text":"const user = await triply.getAccount('my-account') const dataset = await user.getDataset('my-dataset') const service = await dataset.addService('my-service') await service.delete()","title":"Examples"},{"location":"triplydb-js/service/#servicegetinfo","text":"Returns information about this service. Information is returned in a dictionary object. Individual keys can be accessed for specific information values.","title":"Service.getInfo()"},{"location":"triplydb-js/service/#examples_1","text":"The following snippet prints information about the newly created service (named my-service ): const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const service = await dataset.addService('my-service') console.log(await service.getInfo())","title":"Examples"},{"location":"triplydb-js/service/#serviceisuptodate","text":"Returns whether this service is synchronized with the dataset contents.","title":"Service.isUpToDate()"},{"location":"triplydb-js/service/#synchronization","text":"Because services must be explicitly synchronized in TriplyDB, it is possible to have services that expose an older version of the dataset and services that expose a newer version of the dataset running next to one another. There are two very common use cases for this: The production version of an application or website runs on an older service. The data does not change, so the application keeps working. The acceptance version of the same application or website runs on a newer service. Once the acceptance version is finished, it becomes the production version and a new service for the new acceptance version is created, etc. An old service is used by legacy software. New users are using the newer endpoint over the current version of the data, but a limited number of older users want to use the legacy version.","title":"Synchronization"},{"location":"triplydb-js/service/#examples_2","text":"The following code checks whether a specific service is synchronized: const account = await triply.getAccount() const dataset = await account.getDataset('my-dataset') const service = await dataset.ensureService('my-service', {type: 'sparql'}) console.log(await service.isUpToDate())","title":"Examples"},{"location":"triplydb-js/service/#serviceupdate-opts","text":"Synchronizes the service. Synchronization means that the data that is used in the service is made consistent with the data that is present in the graphs of the dataset. When one or more graphs are added or deleted, existing services keep exposing the old state of the data. The changes in the data are only exposed in the services after synchronization is performed. You can choose to perform a rolling update, during which a new service is replacing the old one, in order to eliminate downtime. For a rolling update you should use the object {rollingUpdate:true} as opts . In case you want to be able to track the progress of the rolling update, you can use a logging function in the second argument of opts , called onProgress . However, be aware that when using a rolling update, depending on your dataset statements, you might reach your instance's limit of statements' number for services. In this case, you will not be able to do a successful rolling update and you should switch to a regular update.","title":"Service.update( opts?)"},{"location":"triplydb-js/service/#examples_3","text":"When there are multiple services, it is common to synchronize them all in sequence . This ensures that there are always one or more services available. This allows applications to use such services as their backend without any downtime during data changes. The following code synchronizes all services of a dataset in sequence: for (const service of await dataset.getServices()) { service.update() } // For a rolling update for (const service of await dataset.getServices()) { service.update({ rollingUpdate: true }) } Although less common, it is also possible to synchronize all services of a dataset in parallel . This is typically not used in production systems, where data changes must not result in any downtime. Still, parallel synchronization can be useful in development and/or acceptance environments. The following code synchronizes all services of a dataset in parallel: await Promise.all(dataset.getServices().map(service => service.update()))","title":"Examples"},{"location":"triplydb-js/service/#servicewaituntilrunning","text":"A service can be stopped or updated. The use of asynchronous code means that when a start command is given it takes a while before the service is ready for use. To make sure a service is available for querying you can user the function waitUntilRunning() to make sure that the script will wait until the service is ready for use.","title":"Service.waitUntilRunning()"},{"location":"triplydb-js/service/#example","text":"An example of a service being updated and afterwards a query needs to be executed: const triply = App.get({ token: process.env.TOKEN }) const user = await triply.getAccount() const dataset = await user.getDataset('some-dataset') const service = await dataset.getService('some-service') // starting a service but does not wait until it is started await service.start() // Function that checks if a service is available await service.waitUntilRunning()","title":"Example"},{"location":"triplydb-js/service/#setting-up-index-templates-for-elasticsearch-service","text":"TriplyDB allows you to configure a custom mapping for Elasticsearch services in TriplyDB using index templates.","title":"Setting up index templates for ElasticSearch service"},{"location":"triplydb-js/service/#index-templates","text":"Index templates make it possible to create indices with user defined configuration, which an index can then pull from. A template will be defined with a name pattern and some configuration in it. If the name of the index matches the template\u2019s naming pattern, the new index will be created with the configuration defined in the template. Official documentation from ElasticSearch on how to use Index templates can be found here . Index templates on TriplyDB can be configured through either TriplyDB API or TriplyDB.js. When creating a new service for the dataset, we add the config object to the metadata: Dataset.addService(\"SERVICE_NAME\", { type: \"elasticSearch\", config: { indexTemplates: [ { \"index_patterns\": \"index\", \"name\": \"TEMPLATE_NAME\", ... } ] } }) index_patterns and name are obligatory fields to include in the body of index template. It's important that every index template has the field index_patterns equal index ! Below is an example of creating an index template in TriplyDB-JS: import App from '@triply/triplydb/App.js' import dotenv from 'dotenv' dotenv.config() const app = App.get({ token: process.env.TRIPLYDB_TOKEN }) const account = await app.getAccount('ACCOUNT') const dataset = await account.getDataset('DATASET') await dataset.addService('SERVICE_NAME', { \"type\": \"elasticSearch\", \"config\": { \"indexTemplates\": [ { \"name\": \"TEMPLATE_NAME\", \"index_patterns\": \"index\" } ] } })","title":"Index templates"},{"location":"triplydb-js/service/#component-templates","text":"Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. You can find the official documentation on their use in ElasticSearch here . They can be configured through either TriplyDB API or TriplyDB-JS . When creating a new service for the dataset, we add the config object to the metadata: Dataset.addService(\"SERVICE_NAME\", { type: \"elasticSearch\", config: { componentTemplates: [ { \"name\": \"TEMPLATE_NAME\", \"template\": { \"mappings\": { \"properties\": { ... } } }, ... } ] } }) name and template are obligatory fields to include in the body of component template. Component template can only be created together with an index template. In this case Index template needs to contain the field composed_of with the name of the component template. Below is an example of creating a component template for the property https://schema.org/dateCreated to be of type date . import App from '@triply/triplydb/App.js' import dotenv from 'dotenv' dotenv.config() const app = App.get({ token: process.env.TRIPLYDB_TOKEN }) const account = await app.getAccount('ACCOUNT') const dataset = await account.getDataset('DATASET') await dataset.addService('SERVICE_NAME', { \"type\": \"elasticSearch\", \"config\": { \"indexTemplates\": [ { \"name\": \"TEMPLATE_NAME\", \"index_patterns\": \"index\", \"composed_of\": [\"COMPONENT_TEMPLATE_NAME\"], } ], \"componentTemplates\": [ { \"name\": \"COMPONENT_TEMPLATE_NAME\", \"template\": { \"mappings\": { \"properties\": { \"https://schema org/dateCreated\": { \"type\": \"date\" } } } } } ] } })","title":"Component templates"},{"location":"triplydb-js/story/","text":"On this page: Story Story.delete() Examples Story.getInfo() Examples Story.setBanner(file: string) Examples Story \u00b6 A TriplyDB data story is a way of communicating information about your linked data along with explanatory text while also being able to integrate query results. To create Data stories with TriplyDB.js You can use the User.ensureStory or User.addStory functions to create. If you want to retrieve an already created data story you can use the functions User.getStories to iterate over all stories, or retrieve a particular one with User.getStory . Story objects are obtained through the the following methods: User.addStory User.ensureStory User.getStories User.getStory Story.delete() \u00b6 Deletes this story. This deletes all paragraphs that belong to this story. This does not delete the queries that are linked into this story. If you also want to delete the queries, then this must be done with distinct calls of Query.delete() . Examples \u00b6 The following code example deletes a story called 'example-story' under the current user's account: const user = await triply.getUser() const story = await user.getStory('example-story') await story.delete() Story.getInfo() \u00b6 Returns information about this data story. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. Examples \u00b6 The following snippet prints the paragraphs that appear in a data story: for (const element of (await story.getInfo()).content) { if ((element.type = 'paragraph')) { console.log(element.paragraph) } } Story.setBanner(file: string) \u00b6 Sets a new banner for the story. Examples \u00b6 The following snippet uploads the local image in file banner.webp and sets it as the banner image for the story: const user = await triply.getUser() const story = await user.getStory('example-story') await story.setBanner(\"banner.webp\");","title":"Story"},{"location":"triplydb-js/story/#story","text":"A TriplyDB data story is a way of communicating information about your linked data along with explanatory text while also being able to integrate query results. To create Data stories with TriplyDB.js You can use the User.ensureStory or User.addStory functions to create. If you want to retrieve an already created data story you can use the functions User.getStories to iterate over all stories, or retrieve a particular one with User.getStory . Story objects are obtained through the the following methods: User.addStory User.ensureStory User.getStories User.getStory","title":"Story"},{"location":"triplydb-js/story/#storydelete","text":"Deletes this story. This deletes all paragraphs that belong to this story. This does not delete the queries that are linked into this story. If you also want to delete the queries, then this must be done with distinct calls of Query.delete() .","title":"Story.delete()"},{"location":"triplydb-js/story/#examples","text":"The following code example deletes a story called 'example-story' under the current user's account: const user = await triply.getUser() const story = await user.getStory('example-story') await story.delete()","title":"Examples"},{"location":"triplydb-js/story/#storygetinfo","text":"Returns information about this data story. Information is returned in a dictionary object. Individual keys can be accessed for specific information values.","title":"Story.getInfo()"},{"location":"triplydb-js/story/#examples_1","text":"The following snippet prints the paragraphs that appear in a data story: for (const element of (await story.getInfo()).content) { if ((element.type = 'paragraph')) { console.log(element.paragraph) } }","title":"Examples"},{"location":"triplydb-js/story/#storysetbannerfile-string","text":"Sets a new banner for the story.","title":"Story.setBanner(file: string)"},{"location":"triplydb-js/story/#examples_2","text":"The following snippet uploads the local image in file banner.webp and sets it as the banner image for the story: const user = await triply.getUser() const story = await user.getStory('example-story') await story.setBanner(\"banner.webp\");","title":"Examples"},{"location":"triplydb-js/user/","text":"On this page: User Obtaining instances Inheritance Limitations User.addDataset(name: string, metadata?: object) User.addQuery(metadata: object) User.ensureStory(name: string, metadata: object) User.addStory(name: string, metadata?: object) User.createOrganization(name: string, metadata?: object) Access restrictions Arguments Examples User.ensureDataset(name: string, metadata?: object) User.getDataset(name: string) User.getDatasets() User.getInfo() Examples User.getOrganizations() Order considerations Examples See also User.getPinnedItems() User.setAvatar(file: string) User.update(metadata: object) User \u00b6 Instances of class User denote users in TriplyDB. Obtaining instances \u00b6 Users are obtained with method App.getUser(name?: string) : const user = triply.getUser('john-doe') const user = triply.getUser() Alternatively, users are obtained by first obtaining an account ( App.getAccount(name?: string) ) and then casting it to a use ( Account.asUser() ): const account = await triply.getAccount('john-doe') const user = account.asUser() Inheritance \u00b6 User is a subclass of Account , from which it inherits most of its methods. Limitations \u00b6 Users cannot be created or deleted through the TriplyDB.js library. See the Triply Console documentation for how to create and delete users through the web-based GUI. User.addDataset(name: string, metadata?: object) \u00b6 Adds a new TriplyDB dataset with the given name to the current account. Inherited from Account.addDataset(name: string, metadata?: object) . User.addQuery(metadata: object) \u00b6 Adds a new TriplyDB query to the current user. Inherited from Account.addQuery(name:string, metadata: object) . User.ensureStory(name: string, metadata: object) \u00b6 Ensures the existence of a story with the given name and with the specified metadata . Inherited from Account.ensureStory(name: string, metadata: object) . User.addStory(name: string, metadata?: object) \u00b6 Adds a new TriplyDB story with the given name to the current user. Inherited from Account.addStory(name: string, metadata?: object) . User.createOrganization(name: string, metadata?: object) \u00b6 Creates a new organization for which this user will be the owner. Access restrictions \u00b6 This method requires an API token with write access for this user. Arguments \u00b6 Argument name is the URL-friendly name of the new organization. This name can only contain alphanumeric characters and hyphens ( [A-Za-z0-9\\-] ). The optional metadata argument can be used to specify additional metadata. This is a dictionary object with the following optional keys: description The description of the organization. This description can make use of Markdown. email The email address at which the organization can be reached. name The human-readable name of the organization. This name may contain spaces and other non-alphanumeric characters. Examples \u00b6 The following snippet creates a new organization for which John Doe will be the owner. Notice that both a required URL-friendly name ( 'my-organization' ) and an optional display name ( 'My Organization' ) are specified. const user = await triply.getUser('john-doe') await user.createOrganization(my-organization, {name: 'My Organization'})) User.ensureDataset(name: string, metadata?: object) \u00b6 Ensures the existence of a dataset with the given name and with the specified metadata . Inherited from Account.ensureDataset(name: string, metadata?: object) . User.getDataset(name: string) \u00b6 Returns the TriplyDB dataset with the given name that is published by this user. Inherited from Account.getDataset(name: string) . User.getDatasets() \u00b6 Returns an async iterator over the accessible datasets for the current user. Inherited from Account.getDatasets() . User.getInfo() \u00b6 Returns information about this user. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. The information object for users includes the following keys: avatarUrl A URL to the user image. accountName The URL-friendly name of the user. name The human-readable display name of the user description The human-readable description of the user. createdAt The date and time on which the user was created. datasetCount The number of datasets for the user. queryCount The number of queries for the user. storyCount The number of stories for the user pinnedItems An array containing the pinned items (datasets, stories and queries) for the user. role The role of the user. Either 'light', 'regular' or 'siteAdmin'. orgs An array of organizations of which the user is a member. Email address The email address of the user. updatedAt The date and time on which the user was last updated. lastActivity The date and time on which the user was last online on TriplyDB. Examples \u00b6 The following snippet prints an overview of account that is associated with the used API token: const user = await triply.getUser() console.log(await user.getInfo()) User.getOrganizations() \u00b6 Returns an async iterator over the organizations that this user is a member of. Order considerations \u00b6 The order in the list reflects the order in which the organizations appear on the user page in the Triply GUI. Examples \u00b6 The following snippet prints the list of organizations that John Doe is a member of: const user = await triply.getUser('john-doe') for await (const organization of await user.getOrganizations()) { console.log((await organization.getInfo()).name) } See also \u00b6 The async iterator contains organization objects. See the section about the Organization class for methods that can be used on such objects. User.getPinnedItems() \u00b6 Returns the list of datasets, stories and queries that are pinned for the current user. Inherited from Account.getPinnedItems() . User.setAvatar(file: string) \u00b6 Sets a new image that characterized this user. Inherited from Account.setAvatar(file: string) . User.update(metadata: object) \u00b6 Updates the metadata for this user. Inherited from Account.update(metadata: object) .","title":"User"},{"location":"triplydb-js/user/#user","text":"Instances of class User denote users in TriplyDB.","title":"User"},{"location":"triplydb-js/user/#obtaining-instances","text":"Users are obtained with method App.getUser(name?: string) : const user = triply.getUser('john-doe') const user = triply.getUser() Alternatively, users are obtained by first obtaining an account ( App.getAccount(name?: string) ) and then casting it to a use ( Account.asUser() ): const account = await triply.getAccount('john-doe') const user = account.asUser()","title":"Obtaining instances"},{"location":"triplydb-js/user/#inheritance","text":"User is a subclass of Account , from which it inherits most of its methods.","title":"Inheritance"},{"location":"triplydb-js/user/#limitations","text":"Users cannot be created or deleted through the TriplyDB.js library. See the Triply Console documentation for how to create and delete users through the web-based GUI.","title":"Limitations"},{"location":"triplydb-js/user/#useradddatasetname-string-metadata-object","text":"Adds a new TriplyDB dataset with the given name to the current account. Inherited from Account.addDataset(name: string, metadata?: object) .","title":"User.addDataset(name: string, metadata?: object)"},{"location":"triplydb-js/user/#useraddquerymetadata-object","text":"Adds a new TriplyDB query to the current user. Inherited from Account.addQuery(name:string, metadata: object) .","title":"User.addQuery(metadata: object)"},{"location":"triplydb-js/user/#userensurestoryname-string-metadata-object","text":"Ensures the existence of a story with the given name and with the specified metadata . Inherited from Account.ensureStory(name: string, metadata: object) .","title":"User.ensureStory(name: string, metadata: object)"},{"location":"triplydb-js/user/#useraddstoryname-string-metadata-object","text":"Adds a new TriplyDB story with the given name to the current user. Inherited from Account.addStory(name: string, metadata?: object) .","title":"User.addStory(name: string, metadata?: object)"},{"location":"triplydb-js/user/#usercreateorganizationname-string-metadata-object","text":"Creates a new organization for which this user will be the owner.","title":"User.createOrganization(name: string, metadata?: object)"},{"location":"triplydb-js/user/#access-restrictions","text":"This method requires an API token with write access for this user.","title":"Access restrictions"},{"location":"triplydb-js/user/#arguments","text":"Argument name is the URL-friendly name of the new organization. This name can only contain alphanumeric characters and hyphens ( [A-Za-z0-9\\-] ). The optional metadata argument can be used to specify additional metadata. This is a dictionary object with the following optional keys: description The description of the organization. This description can make use of Markdown. email The email address at which the organization can be reached. name The human-readable name of the organization. This name may contain spaces and other non-alphanumeric characters.","title":"Arguments"},{"location":"triplydb-js/user/#examples","text":"The following snippet creates a new organization for which John Doe will be the owner. Notice that both a required URL-friendly name ( 'my-organization' ) and an optional display name ( 'My Organization' ) are specified. const user = await triply.getUser('john-doe') await user.createOrganization(my-organization, {name: 'My Organization'}))","title":"Examples"},{"location":"triplydb-js/user/#userensuredatasetname-string-metadata-object","text":"Ensures the existence of a dataset with the given name and with the specified metadata . Inherited from Account.ensureDataset(name: string, metadata?: object) .","title":"User.ensureDataset(name: string, metadata?: object)"},{"location":"triplydb-js/user/#usergetdatasetname-string","text":"Returns the TriplyDB dataset with the given name that is published by this user. Inherited from Account.getDataset(name: string) .","title":"User.getDataset(name: string)"},{"location":"triplydb-js/user/#usergetdatasets","text":"Returns an async iterator over the accessible datasets for the current user. Inherited from Account.getDatasets() .","title":"User.getDatasets()"},{"location":"triplydb-js/user/#usergetinfo","text":"Returns information about this user. Information is returned in a dictionary object. Individual keys can be accessed for specific information values. The information object for users includes the following keys: avatarUrl A URL to the user image. accountName The URL-friendly name of the user. name The human-readable display name of the user description The human-readable description of the user. createdAt The date and time on which the user was created. datasetCount The number of datasets for the user. queryCount The number of queries for the user. storyCount The number of stories for the user pinnedItems An array containing the pinned items (datasets, stories and queries) for the user. role The role of the user. Either 'light', 'regular' or 'siteAdmin'. orgs An array of organizations of which the user is a member. Email address The email address of the user. updatedAt The date and time on which the user was last updated. lastActivity The date and time on which the user was last online on TriplyDB.","title":"User.getInfo()"},{"location":"triplydb-js/user/#examples_1","text":"The following snippet prints an overview of account that is associated with the used API token: const user = await triply.getUser() console.log(await user.getInfo())","title":"Examples"},{"location":"triplydb-js/user/#usergetorganizations","text":"Returns an async iterator over the organizations that this user is a member of.","title":"User.getOrganizations()"},{"location":"triplydb-js/user/#order-considerations","text":"The order in the list reflects the order in which the organizations appear on the user page in the Triply GUI.","title":"Order considerations"},{"location":"triplydb-js/user/#examples_2","text":"The following snippet prints the list of organizations that John Doe is a member of: const user = await triply.getUser('john-doe') for await (const organization of await user.getOrganizations()) { console.log((await organization.getInfo()).name) }","title":"Examples"},{"location":"triplydb-js/user/#see-also","text":"The async iterator contains organization objects. See the section about the Organization class for methods that can be used on such objects.","title":"See also"},{"location":"triplydb-js/user/#usergetpinneditems","text":"Returns the list of datasets, stories and queries that are pinned for the current user. Inherited from Account.getPinnedItems() .","title":"User.getPinnedItems()"},{"location":"triplydb-js/user/#usersetavatarfile-string","text":"Sets a new image that characterized this user. Inherited from Account.setAvatar(file: string) .","title":"User.setAvatar(file: string)"},{"location":"triplydb-js/user/#userupdatemetadata-object","text":"Updates the metadata for this user. Inherited from Account.update(metadata: object) .","title":"User.update(metadata: object)"},{"location":"triplydb-technical-changelog/","text":"On this page: 24.12.200 24.12.104 24.11.200 24.11.100 24.10.200 24.10.100 24.9.200 24.9.100 24.08.1 24.08.0 24.07.1 24.07.0 SAML 24.06.1 24.06.0 Version tags This changelog covers technical changes related to TriplyDB on-premise deployments. See here for the TriplyDB changelog that is user facing. This intent of this changelog is primarily for documenting breaking changes or changes that are useful to know when deploying/upgrading TriplyDB. 24.12.200 \u00b6 Release date: 2024-12-18 None 24.12.104 \u00b6 Release date: 2024-12-06 The .disableNetworkPolicies property is removed. Instead, use the networkPolicies.enabled property. The API and console apply stricter network policies. As a consequence, you will need to specify a source selector that references your ingress. See the kubernetes documentation for more info on such selectors. Specify this selector in networkPolicies.ingressSelector . An example definition is the following: networkPolicies: ingressSelector: namespaceSelector: matchLabels: kubernetes.io/metadata.name: ingress-nginx 24.11.200 \u00b6 Release date: 2024-11-22 None 24.11.100 \u00b6 Release date: 2024-11-08 None 24.10.200 \u00b6 Release date: 2024-10-25 The .defaultImageRegistry and triplydbImageRegistry fields are now removed. If you used these fields, then you should instead reference the full image path (without the tag) for the images. If you used .defaultImageRegistry with a custom registry, then set the full image tags for these keys: mongodb.image redis.image kubernetesWaitFor.image If you used .triplydbImageRegistry with a custom registry, then set the full image tags for these keys: api.image console.image orchestrator.image indexJobs.image queryJobs.image tdbServices.virtuoso.image tdbServices.jena.image tdbServices.elastic.image tdbServices.blazegraph.image 24.10.100 \u00b6 Release date: 2024-10-11 None 24.9.200 \u00b6 Release date: 2024-09-27 None 24.9.100 \u00b6 The version scheme for the TriplyDB helm charts changed. This is now reflected in this technical changelog. indexingChunkSize (if present) should move to indexJobs.chunkSize queryJobs.nodeMemoryLimitInGb is renamed to queryJobs.nodejsMemoryLimitInGb 24.08.1 \u00b6 None 24.08.0 \u00b6 The service-orchestrator key is renamed to orchestrator 24.07.1 \u00b6 None 24.07.0 \u00b6 SAML \u00b6 SAML configurations changed as follows: api.env.TRIPLY__SAML__0__SIGNING_CERT should be renamed to TRIPLY__SAML__0__PUBLIC_CERT api.env.TRIPLY__SAML__0__CERT should be renamed to TRIPLY__SAML__0__IDP_CERT 24.06.1 \u00b6 None 24.06.0 \u00b6 Version tags \u00b6 Version tags changed. If you have hardcoded versions in your values file (e.g. api.version or console.version ), then remove the -k8 postfix. E.g., change 24.05.1-k8 to 24.05.1 The tdbServices.[service-name].tag property changed to tdbServices.[service-name].version","title":"TriplyDB "},{"location":"triplydb-technical-changelog/#24.12.200","text":"Release date: 2024-12-18 None","title":"24.12.200"},{"location":"triplydb-technical-changelog/#24.12.104","text":"Release date: 2024-12-06 The .disableNetworkPolicies property is removed. Instead, use the networkPolicies.enabled property. The API and console apply stricter network policies. As a consequence, you will need to specify a source selector that references your ingress. See the kubernetes documentation for more info on such selectors. Specify this selector in networkPolicies.ingressSelector . An example definition is the following: networkPolicies: ingressSelector: namespaceSelector: matchLabels: kubernetes.io/metadata.name: ingress-nginx","title":"24.12.104"},{"location":"triplydb-technical-changelog/#24.11.200","text":"Release date: 2024-11-22 None","title":"24.11.200"},{"location":"triplydb-technical-changelog/#24.11.100","text":"Release date: 2024-11-08 None","title":"24.11.100"},{"location":"triplydb-technical-changelog/#24.10.200","text":"Release date: 2024-10-25 The .defaultImageRegistry and triplydbImageRegistry fields are now removed. If you used these fields, then you should instead reference the full image path (without the tag) for the images. If you used .defaultImageRegistry with a custom registry, then set the full image tags for these keys: mongodb.image redis.image kubernetesWaitFor.image If you used .triplydbImageRegistry with a custom registry, then set the full image tags for these keys: api.image console.image orchestrator.image indexJobs.image queryJobs.image tdbServices.virtuoso.image tdbServices.jena.image tdbServices.elastic.image tdbServices.blazegraph.image","title":"24.10.200"},{"location":"triplydb-technical-changelog/#24.10.100","text":"Release date: 2024-10-11 None","title":"24.10.100"},{"location":"triplydb-technical-changelog/#24.9.200","text":"Release date: 2024-09-27 None","title":"24.9.200"},{"location":"triplydb-technical-changelog/#24.9.100","text":"The version scheme for the TriplyDB helm charts changed. This is now reflected in this technical changelog. indexingChunkSize (if present) should move to indexJobs.chunkSize queryJobs.nodeMemoryLimitInGb is renamed to queryJobs.nodejsMemoryLimitInGb","title":"24.9.100"},{"location":"triplydb-technical-changelog/#24.08.1","text":"None","title":"24.08.1"},{"location":"triplydb-technical-changelog/#24.08.0","text":"The service-orchestrator key is renamed to orchestrator","title":"24.08.0"},{"location":"triplydb-technical-changelog/#24.07.1","text":"None","title":"24.07.1"},{"location":"triplydb-technical-changelog/#24.07.0","text":"","title":"24.07.0"},{"location":"triplydb-technical-changelog/#saml","text":"SAML configurations changed as follows: api.env.TRIPLY__SAML__0__SIGNING_CERT should be renamed to TRIPLY__SAML__0__PUBLIC_CERT api.env.TRIPLY__SAML__0__CERT should be renamed to TRIPLY__SAML__0__IDP_CERT","title":"SAML"},{"location":"triplydb-technical-changelog/#24.06.1","text":"None","title":"24.06.1"},{"location":"triplydb-technical-changelog/#24.06.0","text":"","title":"24.06.0"},{"location":"triplydb-technical-changelog/#version-tags","text":"Version tags changed. If you have hardcoded versions in your values file (e.g. api.version or console.version ), then remove the -k8 postfix. E.g., change 24.05.1-k8 to 24.05.1 The tdbServices.[service-name].tag property changed to tdbServices.[service-name].version","title":"Version tags"},{"location":"tutorial/building-a-restful-api/","text":"Building a RESTful API \u00b6 Write the Iterator \u00b6 The iterator is a SPARQL Select query that returns a sequence of bindings that adheres to the query parameters supported for the REST API path. For example, a REST API path for occupations returns bindings for individual occupations, and has the following query parameters: The name of the occupation (variable ?name ) Configuration in case no query parameter is specified \u00b6 The first part of the query string returns the sequence of bindings in case no query parameter is specified. It includes the following things: - Recommended: a graph-clause that scopes to the instance data graph. - The class that corresponds to the REST API path (example: cnluwv:Occupation ). prefix cnluwv: select $this { $this a cnluwv:Occupation. } Configuration for query parameters that map onto required properties \u00b6 For every query parameter, determine whether it is mapped into a property that is required or optional. In our example, query parameter ?name maps onto property skosxl:prefLabel , which is required for every occupation. For each required property, add a Basic Graph Pattern: prefix cnluwv: prefix skosxl: select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name. } Configuration for query parameters that map onto optional properties \u00b6 For every query parameter that maps onto a property that is optional, and the following 3-line template: bind(?ApiVar as ?ApiVar1) optional { $this PROPERTY_PATH ?ApiVar2. } filter(!bound(?ApiVar1) || ?ApiVar1 = ?ApiVar2) We explain each line in detail: The Bind clause makes sure that ?ApiVar1 is only bound if the API variable is specified through a query parameter. The Optional clause makes sure that we match the optional property, if it is present. The Filter clause ensures that the query succeeds if either the query parameter was not set, or if the query parameter was set to the value of the current binfing for $this . prefix cnluwv: prefix skosxl: select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name. bind(?ApiVar as ?ApiVar1) optional { $this PROPERTY_PATH ?ApiVar2. } filter(!bound(?ApiVar1) || ?ApiVar1 = ?ApiVar2) } Full example \u00b6 The following query is an example of an Iterator: prefix cnluwv: prefix skosxl: select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name } Write the Generator \u00b6 The Generator is a SPARQL Construct query that returns one record for each binding of the $this variable in our Iterator. Since we are working with linked data graphs, there is no real notion of a 'record'. This requires us to configure which data items we want to include, and which we want to exclude. The basic Construct query \u00b6 Since our Generator will be a Construct query, we can start out with the following basic query, which return triples from our graph: construct { $this ?p1 ?o1. } where { $this ?p1 ?o1. } We have used the same variable name $this as in our Iterator, but this is merely a naming convention. We must do some extra work to connect the bindings from our Iterator to our Generator... Integrate the Iterator into the Generator \u00b6 In SPARQL, we can integrate any Select query into what is called a Sub-Select clause. This allows us to connect the binding for $this that come out of our Iterator , to the basic template in our Generator : prefix cnluwv: prefix skosxl: construct { $this ?p1 ?o1. } where { { select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm ?name } } $this ?p1 ?o1. } If we specify a query parameter for ?name , we get back the triples that describe the occupation with that name (Basic Triple Pattern $this ?p1 ?o1 ). Extend the Generator with nested information \u00b6 In the previous section , we only returned the triples that directly describe the bindings for $this . However, some relevant information may appear in triples that are further removed from $this . For example, SKOS-XL labels use an extra level of nesting, where the actual label content appears 2-hops away from $this . If we want to include such nested information into our Generate, we must specify this with additional Basic Triple Patterns. Since only some properties use nesting, we must typically enclose deeper hops inside an Optional clause, together with either a whitelist of properties we want to follow, or a blacklist of properties we do not want to follow. In the following example, we use a whitelist to include properties whose textual content is found one nesting level deeper: prefix cnluwv: prefix skosxl: construct { ?this ?p1 ?o1. ?o1 ?p2 ?o2. } where { { select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name. } } $this ?p1 ?o1. optional { ?o1 ?p2 ?o2. filter(?p1 in (cnluwv:hasContentStatus, cnluwv:hasDetailedDescription, skosxl:altLabel, skosxl:prefLabel)) } } Notice that the whitelist is implemented with operator in ; blacklists are similarly implemented with operator not in . Full example \u00b6 The following query is an example of a working Generator query: prefix cnluwv: prefix skosxl: construct { ?this ?p1 ?o1. ?o1 ?p2 ?o2. } where { { select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name. } } $this ?p1 ?o1. optional { ?o1 ?p2 ?o2. filter(?p1 in (cnluwv:hasContentStatus, cnluwv:hasDetailedDescription, skosxl:altLabel, skosxl:prefLabel)) } } Returning JSON \u00b6 From a content perspective, our Generator query functions like a REST API path: we can set zero or more query parameters, and we receive the information that conforms to our configuration. By default, the Generator returns various RDF serializations, such as JSON-LD. While the JSON body contains all the relevant information, the syntactic structure of the body looks quite messy. Adding a JSON-LD Frame \u00b6 In order to better structure the syntax of the returned JSON body, we make use of a JSON-LD Frame. We start out with the empty frame, and will build this out in subsequent steps: {} You can try this out by going to the following query: link In fact, it is best to keep this query open in a separate window or tab, and apply each of the following steps yourself, to see the live effects of changing the JSON-LD Frame configuration. Configure the type \u00b6 We want the JSON objects to describe information of a specific type. In our example, each object should describe an occupation. We can configure this in the JSON-LD Frame by using the \"@type\" key, together with the IRI of the occupation class: { \"@type\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#Occupation\" } We now see that the JSON data starts to form around the occupation node. Configure the context \u00b6 The JSON object that describes an occupation contains a lot of confusing syntax and lengthy IRIs. The JSON-LD standard allows us to clean this up through a piece of configuration called the Context. The context is typically the same for all objects that are returned by an API. The context is itself a JSON object, that is specified under the \"@context\" key. We start by including the following sub-keys: ' \"@base\" configures the IRI namespace of the instances. \"@version\" indicates which JSON-LD version we use. \"@vocab\" configures the IRI namespace of the main vocabulary that is used. We can now abbreviate the configuration for \"@type\" to \"Occupation\" . This will make use of the occupation class within the IRI namespace that is specified under \"@vocab\" : { \"@context\": { \"@base\": \"https://linkeddata.uwv.nl/id/\", \"@version\": 1.1, \"@vocab\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#\" }, \"@type\": \"Occupation\" } Add IRI prefix declarations \u00b6 We can add IRI prefix declarations to the Context. This results in shorted keys and values in the JSON objects: { \"@context\": { \"cnl\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#\", \"dct\": \"http://purl.org/dc/terms/\", \"rdfs\": \"http://www.w3.org/2000/01/rdf-schema#\", \"skos\": \"http://www.w3.org/2004/02/skos/core#\", \"skosxl\": \"http://www.w3.org/2008/05/skos-xl#\", \"xsd\": \"http://www.w3.org/2001/XMLSchema#\", ... } } Configure key names \u00b6 While adding IRI prefix declarations makes many keys and values shorter, we can go one step further and use completely different key names that map onto IRIs. This allows us to add keys in a different language (e.g. in Dutch), or it allows us to get rid of the IRI alias that was still included after adding IRI prefix declarations. Furthermore, we can introduce our own names for the somewhat awkward looking keys \"@id\" and \"@type\" . The following Context results in keys that consist in simple names, devoid of any (abbreviated or full) IRIs, and devoid of strange @-signs (except for the Context key, which cannot be renamed): { \"@context\": { \"@base\": \"https://linkeddata.uwv.nl/id/\", \"@version\": 1.1, \"@vocab\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#\", \"altLabel\": \"skosxl:altLabel\", \"cnl\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#\", \"broadMatch\": \"skos:broadMatch\", \"created\": \"dct:created\", \"dct\": \"http://purl.org/dc/terms/\", \"id\": \"@id\", \"inScheme\": \"skos:inScheme\", \"rdfs\": \"http://www.w3.org/2000/01/rdf-schema#\", \"relatedMatch\": \"skos:relatedMatch\", \"skos\": \"http://www.w3.org/2004/02/skos/core#\", \"skosxl\": \"http://www.w3.org/2008/05/skos-xl#\", \"type\": \"@type\", \"xsd\": \"http://www.w3.org/2001/XMLSchema#\" }, \"type\": \"Occupation\" } Configure datatypes \u00b6 We still have too much syntactic clutter for values with a datatype. For example, this is how a created date is shown: { \"@context\": { ... }, ..., \"created\": { \"type\": \"xsd:dateTime\", \"@value\": \"2024-12-09T00:00:00\" }, ... } Again, we can use the Context to hide unnecessary details from the JSON object. The following entry specifies that the datatype of 'created' values is XML Schema Datatypes (XSD) date/time: { \"@context\": { ..., \"created\": { \"@id\": \"dct:created\", \"@type\": \"xsd:dateTime\" }, ... } } Configure languages \u00b6 We still have too much syntactic clutter for values with a language tag. For example, this is how a literal form is shown: { \"@context\": { ... }, ..., \"literalForm\": { \"@language\": \"nl\", \"@value\": \"medewerker archief\" }, ... } Again, we can use the Context to hide unnecessary details from the JSON object. The following entry specifies that the language of 'literalForm' values is Dutch ('nl'): { \"@context\": { ..., \"literalForm\": { \"@id\": \"skosxl:literalForm\", \"@language\": \"nl\" }, ... } } Scoped contexts \u00b6 { \"@context\": { ..., \"altLabel\": { \"@id\": \"skosxl:altLabel\", \"@context\": { \"literalForm\": { \"@id\": \"skosxl:literalForm\", \"@language\": \"nl\" } } }, ... }, ... } Using the RESTful API \u00b6 Once a couple of SPARQL queries have been specified, it is possible to use the REST API through an OpenAPI Specification. This is done by the following these steps: Create an API Token in the Triply GUI. Go to an HTTPS program, and configure the API Token as an HTTPS Bearer Token. Specify the standard Accept header for YAML, the format used by the OpenAPI Specification: text/vnd.yaml Perform an HTTPS request against URL https://${host}/queries/$[account}/ , where you enter the host name of your Triply environment and the name of the account under which the queries are stored. This downloads the OpenAPI Specification that contains metadata about all queries under the specified account. If you want to retrieve the metadata for one specific query version, change the URL in item 4 to https://${host}/queries/$[account}/${query}/${version} Load the OpenAPI Specification YAML file into an HTTPS program. With the same API Token configured, you can now easily submit queries to the various REST paths.","title":"Building a RESTful API"},{"location":"tutorial/building-a-restful-api/#building-a-restful-api","text":"","title":"Building a RESTful API"},{"location":"tutorial/building-a-restful-api/#write-the-iterator","text":"The iterator is a SPARQL Select query that returns a sequence of bindings that adheres to the query parameters supported for the REST API path. For example, a REST API path for occupations returns bindings for individual occupations, and has the following query parameters: The name of the occupation (variable ?name )","title":"Write the Iterator"},{"location":"tutorial/building-a-restful-api/#configuration-in-case-no-query-parameter-is-specified","text":"The first part of the query string returns the sequence of bindings in case no query parameter is specified. It includes the following things: - Recommended: a graph-clause that scopes to the instance data graph. - The class that corresponds to the REST API path (example: cnluwv:Occupation ). prefix cnluwv: select $this { $this a cnluwv:Occupation. }","title":"Configuration in case no query parameter is specified"},{"location":"tutorial/building-a-restful-api/#configuration-for-query-parameters-that-map-onto-required-properties","text":"For every query parameter, determine whether it is mapped into a property that is required or optional. In our example, query parameter ?name maps onto property skosxl:prefLabel , which is required for every occupation. For each required property, add a Basic Graph Pattern: prefix cnluwv: prefix skosxl: select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name. }","title":"Configuration for query parameters that map onto required properties"},{"location":"tutorial/building-a-restful-api/#configuration-for-query-parameters-that-map-onto-optional-properties","text":"For every query parameter that maps onto a property that is optional, and the following 3-line template: bind(?ApiVar as ?ApiVar1) optional { $this PROPERTY_PATH ?ApiVar2. } filter(!bound(?ApiVar1) || ?ApiVar1 = ?ApiVar2) We explain each line in detail: The Bind clause makes sure that ?ApiVar1 is only bound if the API variable is specified through a query parameter. The Optional clause makes sure that we match the optional property, if it is present. The Filter clause ensures that the query succeeds if either the query parameter was not set, or if the query parameter was set to the value of the current binfing for $this . prefix cnluwv: prefix skosxl: select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name. bind(?ApiVar as ?ApiVar1) optional { $this PROPERTY_PATH ?ApiVar2. } filter(!bound(?ApiVar1) || ?ApiVar1 = ?ApiVar2) }","title":"Configuration for query parameters that map onto optional properties"},{"location":"tutorial/building-a-restful-api/#full-example","text":"The following query is an example of an Iterator: prefix cnluwv: prefix skosxl: select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name }","title":"Full example"},{"location":"tutorial/building-a-restful-api/#write-the-generator","text":"The Generator is a SPARQL Construct query that returns one record for each binding of the $this variable in our Iterator. Since we are working with linked data graphs, there is no real notion of a 'record'. This requires us to configure which data items we want to include, and which we want to exclude.","title":"Write the Generator"},{"location":"tutorial/building-a-restful-api/#the-basic-construct-query","text":"Since our Generator will be a Construct query, we can start out with the following basic query, which return triples from our graph: construct { $this ?p1 ?o1. } where { $this ?p1 ?o1. } We have used the same variable name $this as in our Iterator, but this is merely a naming convention. We must do some extra work to connect the bindings from our Iterator to our Generator...","title":"The basic Construct query"},{"location":"tutorial/building-a-restful-api/#integrate-the-iterator-into-the-generator","text":"In SPARQL, we can integrate any Select query into what is called a Sub-Select clause. This allows us to connect the binding for $this that come out of our Iterator , to the basic template in our Generator : prefix cnluwv: prefix skosxl: construct { $this ?p1 ?o1. } where { { select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm ?name } } $this ?p1 ?o1. } If we specify a query parameter for ?name , we get back the triples that describe the occupation with that name (Basic Triple Pattern $this ?p1 ?o1 ).","title":"Integrate the Iterator into the Generator"},{"location":"tutorial/building-a-restful-api/#extend-the-generator-with-nested-information","text":"In the previous section , we only returned the triples that directly describe the bindings for $this . However, some relevant information may appear in triples that are further removed from $this . For example, SKOS-XL labels use an extra level of nesting, where the actual label content appears 2-hops away from $this . If we want to include such nested information into our Generate, we must specify this with additional Basic Triple Patterns. Since only some properties use nesting, we must typically enclose deeper hops inside an Optional clause, together with either a whitelist of properties we want to follow, or a blacklist of properties we do not want to follow. In the following example, we use a whitelist to include properties whose textual content is found one nesting level deeper: prefix cnluwv: prefix skosxl: construct { ?this ?p1 ?o1. ?o1 ?p2 ?o2. } where { { select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name. } } $this ?p1 ?o1. optional { ?o1 ?p2 ?o2. filter(?p1 in (cnluwv:hasContentStatus, cnluwv:hasDetailedDescription, skosxl:altLabel, skosxl:prefLabel)) } } Notice that the whitelist is implemented with operator in ; blacklists are similarly implemented with operator not in .","title":"Extend the Generator with nested information"},{"location":"tutorial/building-a-restful-api/#full-example_1","text":"The following query is an example of a working Generator query: prefix cnluwv: prefix skosxl: construct { ?this ?p1 ?o1. ?o1 ?p2 ?o2. } where { { select $this { $this a cnluwv:Occupation; skosxl:prefLabel/skosxl:literalForm $name. } } $this ?p1 ?o1. optional { ?o1 ?p2 ?o2. filter(?p1 in (cnluwv:hasContentStatus, cnluwv:hasDetailedDescription, skosxl:altLabel, skosxl:prefLabel)) } }","title":"Full example"},{"location":"tutorial/building-a-restful-api/#returning-json","text":"From a content perspective, our Generator query functions like a REST API path: we can set zero or more query parameters, and we receive the information that conforms to our configuration. By default, the Generator returns various RDF serializations, such as JSON-LD. While the JSON body contains all the relevant information, the syntactic structure of the body looks quite messy.","title":"Returning JSON"},{"location":"tutorial/building-a-restful-api/#adding-a-json-ld-frame","text":"In order to better structure the syntax of the returned JSON body, we make use of a JSON-LD Frame. We start out with the empty frame, and will build this out in subsequent steps: {} You can try this out by going to the following query: link In fact, it is best to keep this query open in a separate window or tab, and apply each of the following steps yourself, to see the live effects of changing the JSON-LD Frame configuration.","title":"Adding a JSON-LD Frame"},{"location":"tutorial/building-a-restful-api/#configure-the-type","text":"We want the JSON objects to describe information of a specific type. In our example, each object should describe an occupation. We can configure this in the JSON-LD Frame by using the \"@type\" key, together with the IRI of the occupation class: { \"@type\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#Occupation\" } We now see that the JSON data starts to form around the occupation node.","title":"Configure the type"},{"location":"tutorial/building-a-restful-api/#configure-the-context","text":"The JSON object that describes an occupation contains a lot of confusing syntax and lengthy IRIs. The JSON-LD standard allows us to clean this up through a piece of configuration called the Context. The context is typically the same for all objects that are returned by an API. The context is itself a JSON object, that is specified under the \"@context\" key. We start by including the following sub-keys: ' \"@base\" configures the IRI namespace of the instances. \"@version\" indicates which JSON-LD version we use. \"@vocab\" configures the IRI namespace of the main vocabulary that is used. We can now abbreviate the configuration for \"@type\" to \"Occupation\" . This will make use of the occupation class within the IRI namespace that is specified under \"@vocab\" : { \"@context\": { \"@base\": \"https://linkeddata.uwv.nl/id/\", \"@version\": 1.1, \"@vocab\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#\" }, \"@type\": \"Occupation\" }","title":"Configure the context"},{"location":"tutorial/building-a-restful-api/#add-iri-prefix-declarations","text":"We can add IRI prefix declarations to the Context. This results in shorted keys and values in the JSON objects: { \"@context\": { \"cnl\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#\", \"dct\": \"http://purl.org/dc/terms/\", \"rdfs\": \"http://www.w3.org/2000/01/rdf-schema#\", \"skos\": \"http://www.w3.org/2004/02/skos/core#\", \"skosxl\": \"http://www.w3.org/2008/05/skos-xl#\", \"xsd\": \"http://www.w3.org/2001/XMLSchema#\", ... } }","title":"Add IRI prefix declarations"},{"location":"tutorial/building-a-restful-api/#configure-key-names","text":"While adding IRI prefix declarations makes many keys and values shorter, we can go one step further and use completely different key names that map onto IRIs. This allows us to add keys in a different language (e.g. in Dutch), or it allows us to get rid of the IRI alias that was still included after adding IRI prefix declarations. Furthermore, we can introduce our own names for the somewhat awkward looking keys \"@id\" and \"@type\" . The following Context results in keys that consist in simple names, devoid of any (abbreviated or full) IRIs, and devoid of strange @-signs (except for the Context key, which cannot be renamed): { \"@context\": { \"@base\": \"https://linkeddata.uwv.nl/id/\", \"@version\": 1.1, \"@vocab\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#\", \"altLabel\": \"skosxl:altLabel\", \"cnl\": \"https://linkeddata.uwv.nl/ns/competentnl_uwv#\", \"broadMatch\": \"skos:broadMatch\", \"created\": \"dct:created\", \"dct\": \"http://purl.org/dc/terms/\", \"id\": \"@id\", \"inScheme\": \"skos:inScheme\", \"rdfs\": \"http://www.w3.org/2000/01/rdf-schema#\", \"relatedMatch\": \"skos:relatedMatch\", \"skos\": \"http://www.w3.org/2004/02/skos/core#\", \"skosxl\": \"http://www.w3.org/2008/05/skos-xl#\", \"type\": \"@type\", \"xsd\": \"http://www.w3.org/2001/XMLSchema#\" }, \"type\": \"Occupation\" }","title":"Configure key names"},{"location":"tutorial/building-a-restful-api/#configure-datatypes","text":"We still have too much syntactic clutter for values with a datatype. For example, this is how a created date is shown: { \"@context\": { ... }, ..., \"created\": { \"type\": \"xsd:dateTime\", \"@value\": \"2024-12-09T00:00:00\" }, ... } Again, we can use the Context to hide unnecessary details from the JSON object. The following entry specifies that the datatype of 'created' values is XML Schema Datatypes (XSD) date/time: { \"@context\": { ..., \"created\": { \"@id\": \"dct:created\", \"@type\": \"xsd:dateTime\" }, ... } }","title":"Configure datatypes"},{"location":"tutorial/building-a-restful-api/#configure-languages","text":"We still have too much syntactic clutter for values with a language tag. For example, this is how a literal form is shown: { \"@context\": { ... }, ..., \"literalForm\": { \"@language\": \"nl\", \"@value\": \"medewerker archief\" }, ... } Again, we can use the Context to hide unnecessary details from the JSON object. The following entry specifies that the language of 'literalForm' values is Dutch ('nl'): { \"@context\": { ..., \"literalForm\": { \"@id\": \"skosxl:literalForm\", \"@language\": \"nl\" }, ... } }","title":"Configure languages"},{"location":"tutorial/building-a-restful-api/#scoped-contexts","text":"{ \"@context\": { ..., \"altLabel\": { \"@id\": \"skosxl:altLabel\", \"@context\": { \"literalForm\": { \"@id\": \"skosxl:literalForm\", \"@language\": \"nl\" } } }, ... }, ... }","title":"Scoped contexts"},{"location":"tutorial/building-a-restful-api/#using-the-restful-api","text":"Once a couple of SPARQL queries have been specified, it is possible to use the REST API through an OpenAPI Specification. This is done by the following these steps: Create an API Token in the Triply GUI. Go to an HTTPS program, and configure the API Token as an HTTPS Bearer Token. Specify the standard Accept header for YAML, the format used by the OpenAPI Specification: text/vnd.yaml Perform an HTTPS request against URL https://${host}/queries/$[account}/ , where you enter the host name of your Triply environment and the name of the account under which the queries are stored. This downloads the OpenAPI Specification that contains metadata about all queries under the specified account. If you want to retrieve the metadata for one specific query version, change the URL in item 4 to https://${host}/queries/$[account}/${query}/${version} Load the OpenAPI Specification YAML file into an HTTPS program. With the same API Token configured, you can now easily submit queries to the various REST paths.","title":"Using the RESTful API"},{"location":"yasgui/","text":"On this page: Yasgui SPARQL Editor Supported key combinations Templates SPARQL-concat Handlebars Rendering HTML Visualizations Table Features Table Example Response Gallery (TriplyDB Plugin) Variables Format Styling Gallery Example Chart (TriplyDB Plugin) Geo (TriplyDB Plugin) Variables Color values WMS tile-servers Geo-3D (TriplyDB-only) Variables Geo Events (TriplyDB Plugin) Pivot Table (TriplyDB Plugin) Timeline (TriplyDB Plugin) Network (TriplyDB Plugin) Markup (TriplyDB Plugin) Yasgui \u00b6 This section explains the use of SPARQL via Yasgui. Yasgui provides various advanced features for creating, sharing, and visualizing SPARQL queries and their results. SPARQL Editor \u00b6 The Yasgui SPARQL editor is a query editor that offers syntax highlighting, syntax validation, autocompletion, a variety of different SPARQL result visualizations, with a plugin architecture that enables customization . By default, the query editor provides autocomplete suggestions via the LOV API. Website maintainers can add their own autocompletion logic as well. For example, the Yasgui integration in TriplyDB uses the TriplyDB API to more accurately provide suggestions based on the underlying data. Sharing queries now involves less than having to copy/past complete SPARQL queries. Instead, you can share your query (and the corresponding visualization settings) using a simple URL. Supported key combinations \u00b6 The following table enumerates the key combinations that are supported by the SPARQL Editor. Key combination Behavior Alt + Left Move the cursor to the beginning of the current line. Alt + Right Move the cursor to the end of the current line. Alt + U Redo the last change within the current selection. Ctrl + Backspace Delete to the beginning of the group before the cursor. Ctrl + Delete Delete to the beginning of the group after the cursor. Ctrl + End Move the cursor to the end of the query. Ctrl + Home Move the cursor to the start of the query. Ctrl + Left Move the cursor to the left of the group before the cursor. Ctrl + Right Move the cursor to the right of the group the cursor. Ctrl + [ Decrements the indentation for the current line or the lines involved in the current selection. Ctrl + ] Increments the indentation for the current line or the lines involved in the current selection. Ctrl + / Toggles on/off the commenting of the current line or the lines involved in the current selection. Ctrl + A Select the whole query. Ctrl + D Deletes the current line or all lines involved in the current selection. Ctrl + U Undo the last change within the current selection. Ctrl + Y Redo the last undone edit action. Ctrl + Z Undo the last edit action. Ctrl + Shift + F Auto-formats the whole query or the lines involved in the current selection. Shift + Tab Auto-indents the current line or the lines involved in the current selection. Tab Indents the current line or the lines involved in the current selection. Templates \u00b6 SPARQL has standardized capabilities for constructing complex strings and literals. This allows human-readable label and HTML widgets to be generated from within SPARQL. Unfortunately, the syntax for constructing such labels and widgets is a bit cumbersome. SPARQL-concat \u00b6 For example, the following SPARQL query returns HTML widgets that can be displayed in a web browser (see SPARQL Gallery ). It uses the concat function which allows an arbitrary number of string arguments to be concatenated into one string. Notice that this requires extensive quoting for each argument (e.g., '

' ), as well as conversions from literals to strings (e.g., str(?typeName) ). Finally, in order to return an HTML literal we need to first bind the concatenated string to some variable ?lex , and then apply the strdt function in order to construct a literal with datatype IRI rdf:HTML . You can try this query online . prefix def: prefix foaf: prefix rdf: prefix rdfs: select * { ?pokemon def:cry ?cry; def:type/rdfs:label ?typeName; foaf:depiction ?image; rdfs:label ?name. bind(concat('

',str(?typeName),' \u300b ',str(?name),'

', '', '') as ?lex) bind(strdt(?lex,rdf:HTML) as ?widget) } limit 25 Handlebars \u00b6 The SPARQL Editor in TriplyDB supports SPARQL Templates, which makes it easier to write human-readable labels and HTML widgets. SPARQL Templates are strings in which occurrences of {{x}} will be replaced with the to-string converted results of bindings to SPARQL variable ?x . The following example query produces the same result set as the above one, but allows the entire HTML string to be written at once as a SPARQL Template. Notice that this removes the need for concatenating ( concat/n ), explicit to-string conversion ( str/1 ), and also allows the HTML literal to be constructed more easily (no strdt/2 needed). You can try this query online . prefix def: prefix foaf: prefix rdf: prefix rdfs: select * { ?pokemon def:cry ?cry; def:type/rdfs:label ?typeName; foaf:depiction ?image; rdfs:label ?name. bind('''

{{typeName}} \u300b {{name}}

'''^^rdf:HTML as ?widget) } limit 25 SPARQL Templates can be combined with the SPARQL Gallery feature in order to generate galleries of HTML widgets. Rendering HTML \u00b6 To distinguish between text and HTML result values the visualization library checks for the rdf:HTML datatype. The following query will return as plain text select * { bind('

Test

' as ?widget) } This query will render the result as HTML PREFIX rdf: select * { bind('

Test

'^^rdf:HTML as ?widget) } In order to guarantee safety, TriplyDB sanitizes HTML literals before rendering them. This means that tags like ,