From 53bd0cd43fd993cc149f048653a7b9aae5795d40 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 12 Nov 2025 12:52:34 +0000 Subject: [PATCH 1/4] Initial plan From 4a8ce553b84c731344d7e4856c9b9c24e49ade29 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 12 Nov 2025 12:57:26 +0000 Subject: [PATCH 2/4] Fix typos in code files (TypeScript, CSS) Co-authored-by: ayush-shah <40225091+ayush-shah@users.noreply.github.com> --- .../CodeWithLanguageSelector/CodeWithLanguageSelector.tsx | 2 +- components/ErrorBoundary.tsx | 2 +- pages/[version]/index.tsx | 2 +- public/globals.css | 2 +- ...ugageSelectorUtils.tsx => CodeWithLanguageSelectorUtils.tsx} | 0 5 files changed, 4 insertions(+), 4 deletions(-) rename utils/{CodeWithLanugageSelectorUtils.tsx => CodeWithLanguageSelectorUtils.tsx} (100%) diff --git a/components/CodeWithLanguageSelector/CodeWithLanguageSelector.tsx b/components/CodeWithLanguageSelector/CodeWithLanguageSelector.tsx index a0f1f0c83..24993f154 100644 --- a/components/CodeWithLanguageSelector/CodeWithLanguageSelector.tsx +++ b/components/CodeWithLanguageSelector/CodeWithLanguageSelector.tsx @@ -4,7 +4,7 @@ import { useCodeWithLanguageSelectorContext } from "../../context/CodeWithLangua import { getLanguageIcon, getLanguageName, -} from "../../utils/CodeWithLanugageSelectorUtils"; +} from "../../utils/CodeWithLanguageSelectorUtils"; import codeStyles from "../common/Code/Code.module.css"; import styles from "./CodeWithLanguageSelector.module.css"; diff --git a/components/ErrorBoundary.tsx b/components/ErrorBoundary.tsx index 2c47985f3..837312db0 100644 --- a/components/ErrorBoundary.tsx +++ b/components/ErrorBoundary.tsx @@ -23,7 +23,7 @@ class ErrorBoundary extends Component { public render() { if (this.state.hasError) { - return

Sorry!! Some Error occured

; + return

Sorry!! Some Error occurred

; } return this.props.children; diff --git a/pages/[version]/index.tsx b/pages/[version]/index.tsx index d9337885e..094f6f863 100644 --- a/pages/[version]/index.tsx +++ b/pages/[version]/index.tsx @@ -149,7 +149,7 @@ export async function getServerSideProps(context: GetServerSidePropsContext) { // If the version value is not of accepted version format, redirect to the latest version page // Example use case, in case of path `/releases` (without version) the control will come here // with version value as `releases`. In this case redirect user to latest version page of - // passed URL, in this case tha will be `/latest/releases` + // passed URL, in this case that will be `/latest/releases` return { redirect: { permanent: true, diff --git a/public/globals.css b/public/globals.css index 36b3d2db7..61dba16d5 100644 --- a/public/globals.css +++ b/public/globals.css @@ -393,7 +393,7 @@ code::-webkit-scrollbar { visibility: hidden; text-decoration: none; pointer-events: none; - /* Using the bellow approach to add a scrolling margin, + /* Using the below approach to add a scrolling margin, Taking this approach as scroll-margin-top is ineffective due to a possible CSS bug for custom web components mentioned here https://developer.mozilla.org/en-US/docs/Web/CSS/:target#examples*/ margin-top: -300px !important; diff --git a/utils/CodeWithLanugageSelectorUtils.tsx b/utils/CodeWithLanguageSelectorUtils.tsx similarity index 100% rename from utils/CodeWithLanugageSelectorUtils.tsx rename to utils/CodeWithLanguageSelectorUtils.tsx From 26215cabeba325286b2aeb1adb1d61728a658049 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 12 Nov 2025 12:58:43 +0000 Subject: [PATCH 3/4] Fix typos in markdown documentation files Co-authored-by: ayush-shah <40225091+ayush-shah@users.noreply.github.com> --- .../connectors/dashboard/configure-ingestion.md | 2 +- .../connectors/yaml/dashboard/source-config-def.md | 2 +- .../deployment/upgrade/upgrade-prerequisites.md | 2 +- content/partials/v1.10/releases/1.5.7.md | 2 +- content/partials/v1.10/releases/1.6.2.md | 2 +- content/partials/v1.10/releases/1.9.0.md | 2 +- .../connectors/dashboard/configure-ingestion.md | 2 +- .../connectors/yaml/dashboard/source-config-def.md | 2 +- .../deployment/upgrade/upgrade-prerequisites.md | 2 +- content/partials/v1.11/releases/1.5.7.md | 2 +- content/partials/v1.11/releases/1.6.2.md | 2 +- content/partials/v1.11/releases/1.9.0.md | 2 +- .../v1.9/connectors/dashboard/configure-ingestion.md | 2 +- .../connectors/yaml/dashboard/source-config-def.md | 2 +- .../v1.9/deployment/upgrade/upgrade-prerequisites.md | 2 +- content/partials/v1.9/releases/1.5.7.md | 2 +- content/partials/v1.9/releases/1.6.2.md | 2 +- content/partials/v1.9/releases/1.9.0.md | 2 +- .../v1.10.x/deployment/azure-passwordless-auth.md | 2 +- content/v1.10.x/deployment/kubernetes/aks.md | 2 +- content/v1.10.x/deployment/minimum-requirements.md | 2 +- .../azure-key-vault/index.md | 6 +++--- .../deployment/security/jwt-troubleshooting.md | 2 +- .../developing-a-new-connector/apply-ui-changes.md | 2 +- .../develop-ingestion-code.md | 2 +- .../contribute/developing-a-new-connector/test-it.md | 4 ++-- .../how-to-guides/data-collaboration/tasks.md | 2 +- .../data-governance/automation/index.md | 2 +- .../triggers/periodic-batch-entity-trigger.md | 2 +- .../how-to-guides/data-insights/custom-dashboard.md | 2 +- .../observability/alerts.md | 4 ++-- .../profiler/profiler-workflow.md | 2 +- .../data-quality-observability/quality/tests-yaml.md | 12 ++++++------ .../connectors/dashboard/domo-dashboard/yaml.md | 2 +- .../connectors/database/bigquery/index.md | 2 +- .../connectors/database/cassandra/index.md | 2 +- .../connectors/database/cassandra/yaml.md | 2 +- .../connectors/database/domo-database/yaml.md | 2 +- .../connectors/database/mongodb/index.md | 2 +- .../connectors/database/mongodb/yaml.md | 2 +- .../connectors/database/redshift/index.md | 2 +- .../connectors/database/redshift/troubleshooting.md | 2 +- .../connectors/database/redshift/yaml.md | 2 +- .../connectors/pipeline/domo-pipeline/yaml.md | 2 +- .../connectors/troubleshoot/index.md | 10 +++++----- .../deployment/azure-passwordless-auth.md | 2 +- .../v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md | 2 +- .../deployment/minimum-requirements.md | 2 +- .../azure-key-vault/index.md | 6 +++--- .../deployment/security/jwt-troubleshooting.md | 2 +- .../developing-a-new-connector/apply-ui-changes.md | 2 +- .../develop-ingestion-code.md | 2 +- .../contribute/developing-a-new-connector/test-it.md | 4 ++-- .../how-to-guides/data-collaboration/tasks.md | 2 +- .../data-governance/automation/index.md | 2 +- .../triggers/periodic-batch-entity-trigger.md | 2 +- .../how-to-guides/data-insights/custom-dashboard.md | 2 +- .../observability/alerts.md | 4 ++-- .../profiler/profiler-workflow.md | 2 +- .../data-quality-observability/quality/tests-yaml.md | 12 ++++++------ .../schemas/email/templateValidationReponse.md | 2 +- .../schemas/entity/feed/customProperty.md | 2 +- .../connections/dashboard/qlikSenseConnection.md | 2 +- .../databaseServiceAutoClassificationPipeline.md | 2 +- .../databaseServiceProfilerPipeline.md | 2 +- content/v1.11.x-SNAPSHOT/sdk/go/index.md | 2 +- .../connectors/dashboard/domo-dashboard/yaml.md | 2 +- content/v1.9.x/connectors/database/bigquery/index.md | 2 +- .../v1.9.x/connectors/database/cassandra/index.md | 2 +- content/v1.9.x/connectors/database/cassandra/yaml.md | 2 +- .../v1.9.x/connectors/database/domo-database/yaml.md | 2 +- content/v1.9.x/connectors/database/mongodb/index.md | 2 +- content/v1.9.x/connectors/database/mongodb/yaml.md | 2 +- content/v1.9.x/connectors/database/redshift/index.md | 2 +- .../connectors/database/redshift/troubleshooting.md | 2 +- content/v1.9.x/connectors/database/redshift/yaml.md | 2 +- .../v1.9.x/connectors/pipeline/domo-pipeline/yaml.md | 2 +- content/v1.9.x/connectors/troubleshoot/index.md | 10 +++++----- content/v1.9.x/deployment/azure-passwordless-auth.md | 2 +- content/v1.9.x/deployment/kubernetes/aks.md | 2 +- content/v1.9.x/deployment/minimum-requirements.md | 2 +- .../azure-key-vault/index.md | 6 +++--- .../deployment/security/jwt-troubleshooting.md | 2 +- .../developing-a-new-connector/apply-ui-changes.md | 2 +- .../develop-ingestion-code.md | 2 +- .../contribute/developing-a-new-connector/test-it.md | 4 ++-- .../v1.9.x/how-to-guides/data-collaboration/tasks.md | 2 +- .../data-governance/automation/index.md | 2 +- .../triggers/periodic-batch-entity-trigger.md | 2 +- .../how-to-guides/data-insights/custom-dashboard.md | 2 +- .../observability/alerts.md | 4 ++-- .../profiler/profiler-workflow.md | 2 +- .../data-quality-observability/quality/tests-yaml.md | 12 ++++++------ .../schemas/email/templateValidationReponse.md | 2 +- .../schemas/entity/feed/customProperty.md | 2 +- .../connections/dashboard/qlikSenseConnection.md | 2 +- .../databaseServiceAutoClassificationPipeline.md | 2 +- .../databaseServiceProfilerPipeline.md | 2 +- content/v1.9.x/sdk/go/index.md | 2 +- 99 files changed, 134 insertions(+), 134 deletions(-) diff --git a/content/partials/v1.10/connectors/dashboard/configure-ingestion.md b/content/partials/v1.10/connectors/dashboard/configure-ingestion.md index 17c59d579..a9edbebda 100644 --- a/content/partials/v1.10/connectors/dashboard/configure-ingestion.md +++ b/content/partials/v1.10/connectors/dashboard/configure-ingestion.md @@ -48,6 +48,6 @@ Make sure the regex filter pattern accounts for this fully-qualified format. - **Include Tags (toggle)**: Set the 'Include Tags' toggle to control whether to include tags in metadata ingestion. - **Include Data Models (toggle)**: Set the 'Include Data Models' toggle to control whether to include tags as part of metadata ingestion. - **Mark Deleted Dashboards (toggle)**: Set the 'Mark Deleted Dashboards' toggle to flag dashboards as soft-deleted if they are not present anymore in the source system. -- **Include Draft Dashboard (toogle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. +- **Include Draft Dashboard (toggle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. {% /extraContent %} \ No newline at end of file diff --git a/content/partials/v1.10/connectors/yaml/dashboard/source-config-def.md b/content/partials/v1.10/connectors/yaml/dashboard/source-config-def.md index 7762d91f1..cf612ab4f 100644 --- a/content/partials/v1.10/connectors/yaml/dashboard/source-config-def.md +++ b/content/partials/v1.10/connectors/yaml/dashboard/source-config-def.md @@ -18,7 +18,7 @@ The `sourceConfig` is defined [here](https://github.com/open-metadata/OpenMetada - **markDeletedDashboards**: Set the 'Mark Deleted Dashboards' toggle to flag dashboards as soft-deleted if they are not present anymore in the source system. -- **Include Draft Dashboard (toogle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. +- **Include Draft Dashboard (toggle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. - **dataModelFilterPattern**: Regex exclude or include data models that matches the pattern. diff --git a/content/partials/v1.10/deployment/upgrade/upgrade-prerequisites.md b/content/partials/v1.10/deployment/upgrade/upgrade-prerequisites.md index fc20e2b1c..03b7018f8 100644 --- a/content/partials/v1.10/deployment/upgrade/upgrade-prerequisites.md +++ b/content/partials/v1.10/deployment/upgrade/upgrade-prerequisites.md @@ -1,6 +1,6 @@ # Prerequisites -Everytime that you plan on upgrading OpenMetadata to a newer version, make sure to go over all these steps: +Every time that you plan on upgrading OpenMetadata to a newer version, make sure to go over all these steps: ## Backup your Metadata diff --git a/content/partials/v1.10/releases/1.5.7.md b/content/partials/v1.10/releases/1.5.7.md index bf015a7d7..61d7fcc99 100644 --- a/content/partials/v1.10/releases/1.5.7.md +++ b/content/partials/v1.10/releases/1.5.7.md @@ -43,7 +43,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fix: Fix exception in search due to exception in database.displayName and databaseSchema.aggregation. - MINOR: Knowledge Center publicationDate mismatch error (Collate) - MINOR: Add owner label for knowledge center right panel (Collate) -- Fix: Automator pagination & improvments (Collate) +- Fix: Automator pagination & improvements (Collate) - Fix: ArchiveLog to FALSE for test connection (Collate) - Fix: Knowledge Page deletion is not deleting from the search index (Collate)`, diff --git a/content/partials/v1.10/releases/1.6.2.md b/content/partials/v1.10/releases/1.6.2.md index 23e73a0b7..a6f458d00 100644 --- a/content/partials/v1.10/releases/1.6.2.md +++ b/content/partials/v1.10/releases/1.6.2.md @@ -33,7 +33,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fix: Unable to remove existing values from custom property (enum data type). - Fix: Custom DI description getting added with HTML p tag. (Collate) - Fix: Knowledge Page hierarchy state doesn't persist on refresh. (Collate) -- Fix: Reindex Page Entitiy is Missing on Collate. (Collate) +- Fix: Reindex Page Entity is Missing on Collate. (Collate) - Fix: Avoid pluralizing for custom charts. (Collate) - Improvement: Ability to sort the DI charts based on date or term. - Improvement: Support test connection api cancellation on click of cancel. diff --git a/content/partials/v1.10/releases/1.9.0.md b/content/partials/v1.10/releases/1.9.0.md index 5c73e933b..04d15b20c 100644 --- a/content/partials/v1.10/releases/1.9.0.md +++ b/content/partials/v1.10/releases/1.9.0.md @@ -300,7 +300,7 @@ If you want to allow your assets to belong to multiple domains, you need to go t - Improve profile workflow config to allow engine configuration - Improve template yml error handling and add ability to trigger it from specific branch - Add support of adding tags in test case form -- Improve react, react-dom, react-router and dependant versions +- Improve react, react-dom, react-router and dependent versions - Improve undici from 5.28.5 to 5.29.0 - Add Postgres SP and UDF descriptions - Add Databricks pipeline lineage diff --git a/content/partials/v1.11/connectors/dashboard/configure-ingestion.md b/content/partials/v1.11/connectors/dashboard/configure-ingestion.md index cd94bff06..edf6682eb 100644 --- a/content/partials/v1.11/connectors/dashboard/configure-ingestion.md +++ b/content/partials/v1.11/connectors/dashboard/configure-ingestion.md @@ -48,6 +48,6 @@ Make sure the regex filter pattern accounts for this fully-qualified format. - **Include Tags (toggle)**: Set the 'Include Tags' toggle to control whether to include tags in metadata ingestion. - **Include Data Models (toggle)**: Set the 'Include Data Models' toggle to control whether to include tags as part of metadata ingestion. - **Mark Deleted Dashboards (toggle)**: Set the 'Mark Deleted Dashboards' toggle to flag dashboards as soft-deleted if they are not present anymore in the source system. -- **Include Draft Dashboard (toogle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. +- **Include Draft Dashboard (toggle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. {% /extraContent %} \ No newline at end of file diff --git a/content/partials/v1.11/connectors/yaml/dashboard/source-config-def.md b/content/partials/v1.11/connectors/yaml/dashboard/source-config-def.md index 7762d91f1..cf612ab4f 100644 --- a/content/partials/v1.11/connectors/yaml/dashboard/source-config-def.md +++ b/content/partials/v1.11/connectors/yaml/dashboard/source-config-def.md @@ -18,7 +18,7 @@ The `sourceConfig` is defined [here](https://github.com/open-metadata/OpenMetada - **markDeletedDashboards**: Set the 'Mark Deleted Dashboards' toggle to flag dashboards as soft-deleted if they are not present anymore in the source system. -- **Include Draft Dashboard (toogle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. +- **Include Draft Dashboard (toggle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. - **dataModelFilterPattern**: Regex exclude or include data models that matches the pattern. diff --git a/content/partials/v1.11/deployment/upgrade/upgrade-prerequisites.md b/content/partials/v1.11/deployment/upgrade/upgrade-prerequisites.md index 0633e615b..8be0d58ae 100644 --- a/content/partials/v1.11/deployment/upgrade/upgrade-prerequisites.md +++ b/content/partials/v1.11/deployment/upgrade/upgrade-prerequisites.md @@ -1,6 +1,6 @@ # Prerequisites -Everytime that you plan on upgrading OpenMetadata to a newer version, make sure to go over all these steps: +Every time that you plan on upgrading OpenMetadata to a newer version, make sure to go over all these steps: ## Backup your Metadata diff --git a/content/partials/v1.11/releases/1.5.7.md b/content/partials/v1.11/releases/1.5.7.md index bf015a7d7..61d7fcc99 100644 --- a/content/partials/v1.11/releases/1.5.7.md +++ b/content/partials/v1.11/releases/1.5.7.md @@ -43,7 +43,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fix: Fix exception in search due to exception in database.displayName and databaseSchema.aggregation. - MINOR: Knowledge Center publicationDate mismatch error (Collate) - MINOR: Add owner label for knowledge center right panel (Collate) -- Fix: Automator pagination & improvments (Collate) +- Fix: Automator pagination & improvements (Collate) - Fix: ArchiveLog to FALSE for test connection (Collate) - Fix: Knowledge Page deletion is not deleting from the search index (Collate)`, diff --git a/content/partials/v1.11/releases/1.6.2.md b/content/partials/v1.11/releases/1.6.2.md index 23e73a0b7..a6f458d00 100644 --- a/content/partials/v1.11/releases/1.6.2.md +++ b/content/partials/v1.11/releases/1.6.2.md @@ -33,7 +33,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fix: Unable to remove existing values from custom property (enum data type). - Fix: Custom DI description getting added with HTML p tag. (Collate) - Fix: Knowledge Page hierarchy state doesn't persist on refresh. (Collate) -- Fix: Reindex Page Entitiy is Missing on Collate. (Collate) +- Fix: Reindex Page Entity is Missing on Collate. (Collate) - Fix: Avoid pluralizing for custom charts. (Collate) - Improvement: Ability to sort the DI charts based on date or term. - Improvement: Support test connection api cancellation on click of cancel. diff --git a/content/partials/v1.11/releases/1.9.0.md b/content/partials/v1.11/releases/1.9.0.md index 5c73e933b..04d15b20c 100644 --- a/content/partials/v1.11/releases/1.9.0.md +++ b/content/partials/v1.11/releases/1.9.0.md @@ -300,7 +300,7 @@ If you want to allow your assets to belong to multiple domains, you need to go t - Improve profile workflow config to allow engine configuration - Improve template yml error handling and add ability to trigger it from specific branch - Add support of adding tags in test case form -- Improve react, react-dom, react-router and dependant versions +- Improve react, react-dom, react-router and dependent versions - Improve undici from 5.28.5 to 5.29.0 - Add Postgres SP and UDF descriptions - Add Databricks pipeline lineage diff --git a/content/partials/v1.9/connectors/dashboard/configure-ingestion.md b/content/partials/v1.9/connectors/dashboard/configure-ingestion.md index 935522325..1b28e20b6 100644 --- a/content/partials/v1.9/connectors/dashboard/configure-ingestion.md +++ b/content/partials/v1.9/connectors/dashboard/configure-ingestion.md @@ -48,6 +48,6 @@ Make sure the regex filter pattern accounts for this fully-qualified format. - **Include Tags (toggle)**: Set the 'Include Tags' toggle to control whether to include tags in metadata ingestion. - **Include Data Models (toggle)**: Set the 'Include Data Models' toggle to control whether to include tags as part of metadata ingestion. - **Mark Deleted Dashboards (toggle)**: Set the 'Mark Deleted Dashboards' toggle to flag dashboards as soft-deleted if they are not present anymore in the source system. -- **Include Draft Dashboard (toogle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. +- **Include Draft Dashboard (toggle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. {% /extraContent %} \ No newline at end of file diff --git a/content/partials/v1.9/connectors/yaml/dashboard/source-config-def.md b/content/partials/v1.9/connectors/yaml/dashboard/source-config-def.md index 7762d91f1..cf612ab4f 100644 --- a/content/partials/v1.9/connectors/yaml/dashboard/source-config-def.md +++ b/content/partials/v1.9/connectors/yaml/dashboard/source-config-def.md @@ -18,7 +18,7 @@ The `sourceConfig` is defined [here](https://github.com/open-metadata/OpenMetada - **markDeletedDashboards**: Set the 'Mark Deleted Dashboards' toggle to flag dashboards as soft-deleted if they are not present anymore in the source system. -- **Include Draft Dashboard (toogle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. +- **Include Draft Dashboard (toggle)**: Set the 'Include Draft Dashboard' toggle to include draft dashboards. By default it will include draft dashboards. - **dataModelFilterPattern**: Regex exclude or include data models that matches the pattern. diff --git a/content/partials/v1.9/deployment/upgrade/upgrade-prerequisites.md b/content/partials/v1.9/deployment/upgrade/upgrade-prerequisites.md index de2a147f5..516524a94 100644 --- a/content/partials/v1.9/deployment/upgrade/upgrade-prerequisites.md +++ b/content/partials/v1.9/deployment/upgrade/upgrade-prerequisites.md @@ -1,6 +1,6 @@ # Prerequisites -Everytime that you plan on upgrading OpenMetadata to a newer version, make sure to go over all these steps: +Every time that you plan on upgrading OpenMetadata to a newer version, make sure to go over all these steps: ## Backup your Metadata diff --git a/content/partials/v1.9/releases/1.5.7.md b/content/partials/v1.9/releases/1.5.7.md index bf015a7d7..61d7fcc99 100644 --- a/content/partials/v1.9/releases/1.5.7.md +++ b/content/partials/v1.9/releases/1.5.7.md @@ -43,7 +43,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fix: Fix exception in search due to exception in database.displayName and databaseSchema.aggregation. - MINOR: Knowledge Center publicationDate mismatch error (Collate) - MINOR: Add owner label for knowledge center right panel (Collate) -- Fix: Automator pagination & improvments (Collate) +- Fix: Automator pagination & improvements (Collate) - Fix: ArchiveLog to FALSE for test connection (Collate) - Fix: Knowledge Page deletion is not deleting from the search index (Collate)`, diff --git a/content/partials/v1.9/releases/1.6.2.md b/content/partials/v1.9/releases/1.6.2.md index 23e73a0b7..a6f458d00 100644 --- a/content/partials/v1.9/releases/1.6.2.md +++ b/content/partials/v1.9/releases/1.6.2.md @@ -33,7 +33,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fix: Unable to remove existing values from custom property (enum data type). - Fix: Custom DI description getting added with HTML p tag. (Collate) - Fix: Knowledge Page hierarchy state doesn't persist on refresh. (Collate) -- Fix: Reindex Page Entitiy is Missing on Collate. (Collate) +- Fix: Reindex Page Entity is Missing on Collate. (Collate) - Fix: Avoid pluralizing for custom charts. (Collate) - Improvement: Ability to sort the DI charts based on date or term. - Improvement: Support test connection api cancellation on click of cancel. diff --git a/content/partials/v1.9/releases/1.9.0.md b/content/partials/v1.9/releases/1.9.0.md index 5c73e933b..04d15b20c 100644 --- a/content/partials/v1.9/releases/1.9.0.md +++ b/content/partials/v1.9/releases/1.9.0.md @@ -300,7 +300,7 @@ If you want to allow your assets to belong to multiple domains, you need to go t - Improve profile workflow config to allow engine configuration - Improve template yml error handling and add ability to trigger it from specific branch - Add support of adding tags in test case form -- Improve react, react-dom, react-router and dependant versions +- Improve react, react-dom, react-router and dependent versions - Improve undici from 5.28.5 to 5.29.0 - Add Postgres SP and UDF descriptions - Add Databricks pipeline lineage diff --git a/content/v1.10.x/deployment/azure-passwordless-auth.md b/content/v1.10.x/deployment/azure-passwordless-auth.md index a89745f17..eb10d1fc6 100644 --- a/content/v1.10.x/deployment/azure-passwordless-auth.md +++ b/content/v1.10.x/deployment/azure-passwordless-auth.md @@ -7,7 +7,7 @@ collate: false # Azure - Enable Passwordless Database Backend Connection By Default, OpenMetadata supports basic authentication when connecting to MySQL/PostgreSQL as Database backend. With Azure, you can enhance the security for configuring Database configurations other the basic authentication mechanism. -This guide will help you setup the application to use passwordless approach for Azure PaaS Databases (preferrably [Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) and [Azure Database for MySQL - Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/overview)). +This guide will help you setup the application to use passwordless approach for Azure PaaS Databases (preferably [Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) and [Azure Database for MySQL - Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/overview)). # Prerequisites diff --git a/content/v1.10.x/deployment/kubernetes/aks.md b/content/v1.10.x/deployment/kubernetes/aks.md index 707ac651d..5abb73493 100644 --- a/content/v1.10.x/deployment/kubernetes/aks.md +++ b/content/v1.10.x/deployment/kubernetes/aks.md @@ -167,7 +167,7 @@ kubectl apply -f permissions_pod.yaml helm repo add open-metadata https://helm.open-metadata.org/ ``` #### Create secrets -It is recommeded to use external database and search for production deplyoments. The following implementation uses external postgresql DB from Azure Database. Any of the popular databases can be used. The default implementation uses mysql. +It is recommended to use external database and search for production deployments. The following implementation uses external postgresql DB from Azure Database. Any of the popular databases can be used. The default implementation uses mysql. ```azure-cli kubectl create secret generic airflow-secrets \ diff --git a/content/v1.10.x/deployment/minimum-requirements.md b/content/v1.10.x/deployment/minimum-requirements.md index e5641c277..00d02d162 100644 --- a/content/v1.10.x/deployment/minimum-requirements.md +++ b/content/v1.10.x/deployment/minimum-requirements.md @@ -35,7 +35,7 @@ Our minimum specs recommendation for ElasticSearch / OpenSearch deployment is - 2 vCPUs - 8 GiB Memory - 100 GiB Storage (per node) -- Master / Worker Nodes with atleast 1 Master and 2 Worker Nodes +- Master / Worker Nodes with at least 1 Master and 2 Worker Nodes ### Software Requirements diff --git a/content/v1.10.x/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md b/content/v1.10.x/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md index 6df556e0b..45ade8eb7 100644 --- a/content/v1.10.x/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md +++ b/content/v1.10.x/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md @@ -20,10 +20,10 @@ for the non-managed follow only the steps related to the Airflow server and CLI. 2. Inside the App Registration, go to `Certificates & Secrets` and create a `Client secret`. Note down the `Value`, it will be our `clientSecret` configuration. 3. From the App Registration overview page, note down the `Application (client) ID` and the `Directory (tenant) ID`. -#### Managed Identity (recommnded) +#### Managed Identity (recommended) -1. In your Azure subscription create [Manged Identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) -2. Use this created identity - for AKS users this means you need to use [Pod Identity](https://learn.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) or [Workload Identity (recommnded)](https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview?tabs=dotnet). +1. In your Azure subscription create [Managed Identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) +2. Use this created identity - for AKS users this means you need to use [Pod Identity](https://learn.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) or [Workload Identity (recommended)](https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview?tabs=dotnet). {% note %} diff --git a/content/v1.10.x/deployment/security/jwt-troubleshooting.md b/content/v1.10.x/deployment/security/jwt-troubleshooting.md index d632484dd..310a49a5b 100644 --- a/content/v1.10.x/deployment/security/jwt-troubleshooting.md +++ b/content/v1.10.x/deployment/security/jwt-troubleshooting.md @@ -6,7 +6,7 @@ collate: false --- # JWT Troubleshooting -Add the `{domain}:{port}/api/v1/sytem/config/jwks` in the list of publicKeys +Add the `{domain}:{port}/api/v1/system/config/jwks` in the list of publicKeys ```yaml authentication: diff --git a/content/v1.10.x/developers/contribute/developing-a-new-connector/apply-ui-changes.md b/content/v1.10.x/developers/contribute/developing-a-new-connector/apply-ui-changes.md index 3d43b7224..27b5d423d 100644 --- a/content/v1.10.x/developers/contribute/developing-a-new-connector/apply-ui-changes.md +++ b/content/v1.10.x/developers/contribute/developing-a-new-connector/apply-ui-changes.md @@ -128,7 +128,7 @@ In this section, we provide guides and references to use the MySQL connector. To extract metadata the user used in the connection needs to have access to the `INFORMATION_SCHEMA`. By default, a user can see only the rows in the `INFORMATION_SCHEMA` that correspond to objects for which the user has the proper access privileges. ~~~SQL --- Create user. If is ommited, defaults to '%' +-- Create user. If is omitted, defaults to '%' -- More details https://dev.mysql.com/doc/refman/8.0/en/create-user.html CREATE USER ''[@''] IDENTIFIED BY ''; diff --git a/content/v1.10.x/developers/contribute/developing-a-new-connector/develop-ingestion-code.md b/content/v1.10.x/developers/contribute/developing-a-new-connector/develop-ingestion-code.md index e2cd41ec0..bce285f6c 100644 --- a/content/v1.10.x/developers/contribute/developing-a-new-connector/develop-ingestion-code.md +++ b/content/v1.10.x/developers/contribute/developing-a-new-connector/develop-ingestion-code.md @@ -88,7 +88,7 @@ From the Service Topology you can understand what methods you need to implement: Can be found in [`ingestion/src/metadata/ingestion/source/database/database_service.py`](https://github.com/open-metadata/OpenMetadata/blob/main/ingestion/src/metadata/ingestion/source/database/database_service.py) {%inlineCallout icon="description" bold="OpenMetadata 1.6.0 or later" href="/deployment"%} -Starting from 1.6.0 the OpenMetadata Ingestion Framewotk is using a ServiceSpec specificaiton +Starting from 1.6.0 the OpenMetadata Ingestion Framewotk is using a ServiceSpec specification in order to define the entrypoints for the ingestion process. {%/inlineCallout%} diff --git a/content/v1.10.x/developers/contribute/developing-a-new-connector/test-it.md b/content/v1.10.x/developers/contribute/developing-a-new-connector/test-it.md index a206bd8d7..cc9cbacf8 100644 --- a/content/v1.10.x/developers/contribute/developing-a-new-connector/test-it.md +++ b/content/v1.10.x/developers/contribute/developing-a-new-connector/test-it.md @@ -5,7 +5,7 @@ slug: /developers/contribute/developing-a-new-connector/test-it # Test It -In order to test your new connector you need to run `make generate` from the project's root in order to generate the propert Python Classes from the JSON Schemas you created and modified. +In order to test your new connector you need to run `make generate` from the project's root in order to generate the property Python Classes from the JSON Schemas you created and modified. ## Unit Tests @@ -23,7 +23,7 @@ This could be slow and in order to iterate faster you could just run the tests y In order to test the connector using the CLI you first need to have the OpenMetadata stack running locally. The easiest way to do is to check how to do it [here](/developers/contribute/build-code-and-run-tests). -With it up and running you can install the ingestion pacakge locally and use the CLI directly: +With it up and running you can install the ingestion package locally and use the CLI directly: ```bash metadata ingest -c {your_yaml_file} diff --git a/content/v1.10.x/how-to-guides/data-collaboration/tasks.md b/content/v1.10.x/how-to-guides/data-collaboration/tasks.md index 4cc52541a..59df01be9 100644 --- a/content/v1.10.x/how-to-guides/data-collaboration/tasks.md +++ b/content/v1.10.x/how-to-guides/data-collaboration/tasks.md @@ -7,7 +7,7 @@ slug: /how-to-guides/data-collaboration/tasks # Create Tasks Tasks are an extension to the Conversation Threads feature where users can create tasks to -request to create or update description or tags for a data asset. Tasks are assgined to the owner of the data asset by default. If there are no owners, the task can be assigned to an appropriate user or team. +request to create or update description or tags for a data asset. Tasks are assigned to the owner of the data asset by default. If there are no owners, the task can be assigned to an appropriate user or team. {% image src="/images/v1.10/how-to-guides/collaboration/task.webp" diff --git a/content/v1.10.x/how-to-guides/data-governance/automation/index.md b/content/v1.10.x/how-to-guides/data-governance/automation/index.md index 900fbec63..e841badd4 100644 --- a/content/v1.10.x/how-to-guides/data-governance/automation/index.md +++ b/content/v1.10.x/how-to-guides/data-governance/automation/index.md @@ -264,4 +264,4 @@ Note that this automation, the ML Tagging, will be deprecated in future releases - **Propagate Metadata Thoughtfully**: When propagating metadata via lineage, make sure that the source metadata is correct before applying it across multiple datasets. - **Start with Controlled Propagation**: For complex and large lineage trees, begin the propagation with a limited propagation depth (e.g., 2-3 levels/depth) and gradually increase as needed to avoid unintended widespread changes. - **Understand Path-Aware Depth Behavior**: In complex lineage with multiple parent paths, remember that propagation depth is calculated separately for each path from each root entity. This ensures precise control over which upstream sources contribute metadata to downstream assets. -- **Set Up Stop Conditions for Critical Data**: Cofigure strategic stop conditions around critical ownership boundaries or sensitive data boundaries (Tags- PII, Confidential) to prevent accidental metadata overwrites. +- **Set Up Stop Conditions for Critical Data**: Configure strategic stop conditions around critical ownership boundaries or sensitive data boundaries (Tags- PII, Confidential) to prevent accidental metadata overwrites. diff --git a/content/v1.10.x/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md b/content/v1.10.x/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md index 2c6631561..76ab4c27d 100644 --- a/content/v1.10.x/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md +++ b/content/v1.10.x/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md @@ -7,7 +7,7 @@ collate: true # Governance Workflows - Periodic Batch Entity Trigger -The **Periodic Batch Entity Trigger** enables actions to be triggered on a periodic schedule, processing a batch of entitites at a time. +The **Periodic Batch Entity Trigger** enables actions to be triggered on a periodic schedule, processing a batch of entities at a time. This type of trigger is useful for automating regular workflows that need to run on a schedule. ## Configuration diff --git a/content/v1.10.x/how-to-guides/data-insights/custom-dashboard.md b/content/v1.10.x/how-to-guides/data-insights/custom-dashboard.md index 9560dcb12..0eb9387eb 100644 --- a/content/v1.10.x/how-to-guides/data-insights/custom-dashboard.md +++ b/content/v1.10.x/how-to-guides/data-insights/custom-dashboard.md @@ -98,7 +98,7 @@ You can choose the field name from the fields dropdown from the chart by functio Remember, spaces in field names are replaced with a period (.), and the key should be in lower camel case (meaning the first word is lowercase, and each subsequent word starts with an uppercase letter). -**Query**: The query is to filter down the result to plot, use the format `q=': '`. The `q=` part signifies a query, adn the condition goes inside the single quotes. +**Query**: The query is to filter down the result to plot, use the format `q=': '`. The `q=` part signifies a query, and the condition goes inside the single quotes. The value can be a literal value or it can be `*` to signify that any value exists for the give field. diff --git a/content/v1.10.x/how-to-guides/data-quality-observability/observability/alerts.md b/content/v1.10.x/how-to-guides/data-quality-observability/observability/alerts.md index 894ec44b1..fdfcf6bfe 100644 --- a/content/v1.10.x/how-to-guides/data-quality-observability/observability/alerts.md +++ b/content/v1.10.x/how-to-guides/data-quality-observability/observability/alerts.md @@ -33,7 +33,7 @@ The first will be to select a source. For data quality you have 2 relevant optio /%} -### Step 2 - Select a Filtering Conditon (optional) +### Step 2 - Select a Filtering Condition (optional) **Note:** if you do not set any filter the alert will apply to all test cases or test suite. You can filter alerts based on specific condition to narrow down which test suite/test case should trigger an alert. This is interesting for user to dispatch alerts to different channels/users. @@ -44,7 +44,7 @@ You can filter alerts based on specific condition to narrow down which test suit caption="Alerts Menu" /%} -### Step 3 - Select a Triggering Conditon +### Step 3 - Select a Triggering Condition Trigger section will allow you set the condition for which an alert should be triggered {% image diff --git a/content/v1.10.x/how-to-guides/data-quality-observability/profiler/profiler-workflow.md b/content/v1.10.x/how-to-guides/data-quality-observability/profiler/profiler-workflow.md index 333639558..136d8fef6 100644 --- a/content/v1.10.x/how-to-guides/data-quality-observability/profiler/profiler-workflow.md +++ b/content/v1.10.x/how-to-guides/data-quality-observability/profiler/profiler-workflow.md @@ -93,7 +93,7 @@ Set the number of rows to ingest when Ingest Sample Data toggle is on. Defaults Number of thread to use when computing metrics for the profiler. For Snowflake users we recommend setting it to 1. There is a known issue with one of the dependency (`snowflake-connector-python`) affecting projects with certain environments. **Timeout in Seconds (Optional)** -This will set the duration a profiling job against a table should wait before interrupting its execution and moving on to profiling the next table. It is important to note that the profiler will wait for the hanging query to terminiate before killing the execution. If there is a risk for your profiling job to hang, it is important to also set a query/connection timeout on your database engine. The default value for the profiler timeout is 12-hours. +This will set the duration a profiling job against a table should wait before interrupting its execution and moving on to profiling the next table. It is important to note that the profiler will wait for the hanging query to terminate before killing the execution. If there is a risk for your profiling job to hang, it is important to also set a query/connection timeout on your database engine. The default value for the profiler timeout is 12-hours. ### 3. Schedule and Deploy After clicking Next, you will be redirected to the Scheduling form. This will be the same as the Metadata and Usage Ingestions. Select your desired schedule and click on Deploy to find the usage pipeline being added to the Service Ingestions. diff --git a/content/v1.10.x/how-to-guides/data-quality-observability/quality/tests-yaml.md b/content/v1.10.x/how-to-guides/data-quality-observability/quality/tests-yaml.md index 87c173caf..870cbd3fc 100644 --- a/content/v1.10.x/how-to-guides/data-quality-observability/quality/tests-yaml.md +++ b/content/v1.10.x/how-to-guides/data-quality-observability/quality/tests-yaml.md @@ -316,12 +316,12 @@ Integrity ``` ### Table Custom SQL Test -Write you own SQL test. When writting your query you can use 2 strategies: -- `ROWS` (default): expects the query to be written as `SELECT , FROM WHERE `. **Note** if your query returns a large amount of rows it might cause an "Out Of Memeory" error. In this case we recomend you to use the `COUNT` strategy. +Write you own SQL test. When writing your query you can use 2 strategies: +- `ROWS` (default): expects the query to be written as `SELECT , FROM WHERE `. **Note** if your query returns a large amount of rows it might cause an "Out Of Memory" error. In this case we recommend you to use the `COUNT` strategy. - `COUNT`: expects the query to be written as `SELECT COUNT() FROM WHERE `. **How to use the Threshold Parameter?** -The threshold allows you to define a limit for which you test should pass or fail - by defaut this number is 0. For example if my custom SQL query test returns 10 rows (or a COUNT value of 10) and my threshold is 5 the test will fail. If I update my threshold to 11 on my next run my test will pass. +The threshold allows you to define a limit for which you test should pass or fail - by default this number is 0. For example if my custom SQL query test returns 10 rows (or a COUNT value of 10) and my threshold is 5 the test will fail. If I update my threshold to 11 on my next run my test will pass. {% note %} @@ -571,7 +571,7 @@ Accuracy **Properties** -* `column`: the colummn that will be used to chech the table freshness +* `column`: the column that will be used to chech the table freshness * `timeSinceUpdate`: (in seconds) The data is expected to be updated within this number of seconds. If the time since the last update is greater than this value, the test will fail. **Behavior** @@ -1520,7 +1520,7 @@ Accuracy ### Column Values To Be At Expected Location Validate the reference value for a column is a the expected geographic location -> Data will be temporarely stored in memory while the test case is running to validate the location. Not data will be permanently stored. +> Data will be temporarily stored in memory while the test case is running to validate the location. Not data will be permanently stored. > France is the only supported location at this time. To add any additional location please reach out to the team in our slack support channel **Dimension**: @@ -1528,7 +1528,7 @@ Accuracy **Properties** -* `locationReferenceType`: the type of location refernce `CITY` or `POSTAL_CODE` +* `locationReferenceType`: the type of location reference `CITY` or `POSTAL_CODE` * `longitudeColumnName`: longitude column name * `latitudeColumnName`: latitude column name * `radius`: radius in meter from which the location can be from the expected lat/long -- acts as a buffer diff --git a/content/v1.11.x-SNAPSHOT/connectors/dashboard/domo-dashboard/yaml.md b/content/v1.11.x-SNAPSHOT/connectors/dashboard/domo-dashboard/yaml.md index 2cb0e6a69..e6c100ce2 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/dashboard/domo-dashboard/yaml.md +++ b/content/v1.11.x-SNAPSHOT/connectors/dashboard/domo-dashboard/yaml.md @@ -23,7 +23,7 @@ Configure and schedule DomoDashboard metadata and profiler workflows from the Op ## Requirements -**Note:** For metadata ingestion, kindly make sure add atleast `dashboard` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add at least `dashboard` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/bigquery/index.md b/content/v1.11.x-SNAPSHOT/connectors/database/bigquery/index.md index 9a401ad24..7548809df 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/bigquery/index.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/bigquery/index.md @@ -1,6 +1,6 @@ --- title: BigQuery | OpenMetadata Connector Setup & Integration Guide -description: Connect BigQuery to OpenMetadata seamlessly with our comprehensive database connector guide. Setup instructions, configuration tips, and metadata extrac... +description: Connect BigQuery to OpenMetadata seamlessly with our comprehensive database connector guide. Setup instructions, configuration tips, and metadata extract... slug: /connectors/database/bigquery --- diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/cassandra/index.md b/content/v1.11.x-SNAPSHOT/connectors/database/cassandra/index.md index 5884b4f88..a2cf86ff3 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/cassandra/index.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/cassandra/index.md @@ -48,7 +48,7 @@ To extract metadata using the Cassandra connector, ensure the user in the connec #### Connection Details - **Username**: Username to connect to Cassandra. This user must have the necessary permissions to perform metadata extraction and table queries. -- **Host Port**: When using the `cassandra` connecion schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`.- **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. +- **Host Port**: When using the `cassandra` connection schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`.- **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. **Auth Type**: Following authentication types are supported: 1. **Basic Authentication**: diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/cassandra/yaml.md b/content/v1.11.x-SNAPSHOT/connectors/database/cassandra/yaml.md index 44287a553..32d94c54c 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/cassandra/yaml.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/cassandra/yaml.md @@ -73,7 +73,7 @@ This is a sample config for Cassandra: {% codeInfo srNumber=3 %} -**hostPort**: When using the `cassandra` connecion schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`. +**hostPort**: When using the `cassandra` connection schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`. {% /codeInfo %} diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/domo-database/yaml.md b/content/v1.11.x-SNAPSHOT/connectors/database/domo-database/yaml.md index 8cfbcf83b..6a441fe48 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/domo-database/yaml.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/domo-database/yaml.md @@ -26,7 +26,7 @@ Configure and schedule DomoDatabase metadata and profiler workflows from the Ope **Note:** -For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. +For metadata ingestion, kindly make sure add at least `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/mongodb/index.md b/content/v1.11.x-SNAPSHOT/connectors/database/mongodb/index.md index b122d8f11..5a4d578a2 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/mongodb/index.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/mongodb/index.md @@ -65,7 +65,7 @@ For a complete guide on managing secrets in hybrid setups, see the [Hybrid Inges - **Username**: Username to connect to Mongodb. This user must have access to perform `find` operation on collection and `listCollection` operations on database available in MongoDB. - **Password**: Password to connect to MongoDB. -- **Host Port**: When using the `mongodb` connecion schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. +- **Host Port**: When using the `mongodb` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. - **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. Using Atlas? Follow [this guide](https://www.mongodb.com/docs/guides/atlas/connection-string/) to get the connection string. diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/mongodb/yaml.md b/content/v1.11.x-SNAPSHOT/connectors/database/mongodb/yaml.md index 3751ae333..ce020a651 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/mongodb/yaml.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/mongodb/yaml.md @@ -75,7 +75,7 @@ This is a sample config for MongoDB: {% codeInfo srNumber=3 %} -**hostPort**: When using the `mongodb` connecion schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. +**hostPort**: When using the `mongodb` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. Using Atlas? Follow [this guide](https://www.mongodb.com/docs/guides/atlas/connection-string/) to get the connection string. diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/redshift/index.md b/content/v1.11.x-SNAPSHOT/connectors/database/redshift/index.md index a9ca1fbe7..a52ce3e0c 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/redshift/index.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/redshift/index.md @@ -72,7 +72,7 @@ For the usage and lineage workflow, the user will need `SELECT` privilege on `ST } /%} -It is recommmended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. +It is recommended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. {% stepsContainer %} {% extraContent parentTagName="stepsContainer" %} diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/redshift/troubleshooting.md b/content/v1.11.x-SNAPSHOT/connectors/database/redshift/troubleshooting.md index 72596c64f..053cbdbd1 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/redshift/troubleshooting.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/redshift/troubleshooting.md @@ -23,7 +23,7 @@ src="/images/v1.11/connectors/redshift/service-connection-arguments.png" alt="Configure service connection" caption="Configure the service connection by filling the form" /%} -### Metdata Ingestion Failure +### Metadata Ingestion Failure If your metadata ingesiton fails and you have errors like: diff --git a/content/v1.11.x-SNAPSHOT/connectors/database/redshift/yaml.md b/content/v1.11.x-SNAPSHOT/connectors/database/redshift/yaml.md index 12e579d38..0b1112242 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/database/redshift/yaml.md +++ b/content/v1.11.x-SNAPSHOT/connectors/database/redshift/yaml.md @@ -68,7 +68,7 @@ The workflow is modeled around the following **Note:** During the metadata ingestion for redshift, the tables in which the distribution style i.e `DISTSTYLE` is not `AUTO` will be marked as partitioned tables -It is recommmended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. +It is recommended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. ### 1. Define the YAML Config diff --git a/content/v1.11.x-SNAPSHOT/connectors/pipeline/domo-pipeline/yaml.md b/content/v1.11.x-SNAPSHOT/connectors/pipeline/domo-pipeline/yaml.md index eb0500cc5..91e994776 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/pipeline/domo-pipeline/yaml.md +++ b/content/v1.11.x-SNAPSHOT/connectors/pipeline/domo-pipeline/yaml.md @@ -24,7 +24,7 @@ Configure and schedule Domo Pipeline metadata and profiler workflows from the Op ## Requirements -**Note:** For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add at least `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/content/v1.11.x-SNAPSHOT/connectors/troubleshoot/index.md b/content/v1.11.x-SNAPSHOT/connectors/troubleshoot/index.md index dbf53a6ba..b85bb4e15 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/troubleshoot/index.md +++ b/content/v1.11.x-SNAPSHOT/connectors/troubleshoot/index.md @@ -177,8 +177,8 @@ In Connection details section page click on Create a personal access token. {% image src="/images/v1.11/connectors/databricks/Open-create-tocken-page.png" -alt="Open create tocken" -caption="Open create tocken" /%} +alt="Open create token" +caption="Open create token" /%} @@ -187,8 +187,8 @@ Now In this page you can create new `token`. {% image src="/images/v1.11/connectors/databricks/Generate-token.png" -alt="Generate tocken" -caption="Generate tocken" /%} +alt="Generate token" +caption="Generate token" /%} ## Domo Database @@ -395,7 +395,7 @@ src="/images/v1.11/connectors/redshift/service-connection-arguments.png" alt="Configure service connection" caption="Configure the service connection by filling the form" /%} -### Metdata Ingestion Failure +### Metadata Ingestion Failure If your metadata ingesiton fails and you have errors like: diff --git a/content/v1.11.x-SNAPSHOT/deployment/azure-passwordless-auth.md b/content/v1.11.x-SNAPSHOT/deployment/azure-passwordless-auth.md index a89745f17..eb10d1fc6 100644 --- a/content/v1.11.x-SNAPSHOT/deployment/azure-passwordless-auth.md +++ b/content/v1.11.x-SNAPSHOT/deployment/azure-passwordless-auth.md @@ -7,7 +7,7 @@ collate: false # Azure - Enable Passwordless Database Backend Connection By Default, OpenMetadata supports basic authentication when connecting to MySQL/PostgreSQL as Database backend. With Azure, you can enhance the security for configuring Database configurations other the basic authentication mechanism. -This guide will help you setup the application to use passwordless approach for Azure PaaS Databases (preferrably [Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) and [Azure Database for MySQL - Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/overview)). +This guide will help you setup the application to use passwordless approach for Azure PaaS Databases (preferably [Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) and [Azure Database for MySQL - Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/overview)). # Prerequisites diff --git a/content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md b/content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md index f9c25630a..8624337a6 100644 --- a/content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md +++ b/content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md @@ -167,7 +167,7 @@ kubectl apply -f permissions_pod.yaml helm repo add open-metadata https://helm.open-metadata.org/ ``` #### Create secrets -It is recommeded to use external database and search for production deplyoments. The following implementation uses external postgresql DB from Azure Database. Any of the popular databases can be used. The default implementation uses mysql. +It is recommended to use external database and search for production deployments. The following implementation uses external postgresql DB from Azure Database. Any of the popular databases can be used. The default implementation uses mysql. ```azure-cli kubectl create secret generic airflow-secrets \ diff --git a/content/v1.11.x-SNAPSHOT/deployment/minimum-requirements.md b/content/v1.11.x-SNAPSHOT/deployment/minimum-requirements.md index e5641c277..00d02d162 100644 --- a/content/v1.11.x-SNAPSHOT/deployment/minimum-requirements.md +++ b/content/v1.11.x-SNAPSHOT/deployment/minimum-requirements.md @@ -35,7 +35,7 @@ Our minimum specs recommendation for ElasticSearch / OpenSearch deployment is - 2 vCPUs - 8 GiB Memory - 100 GiB Storage (per node) -- Master / Worker Nodes with atleast 1 Master and 2 Worker Nodes +- Master / Worker Nodes with at least 1 Master and 2 Worker Nodes ### Software Requirements diff --git a/content/v1.11.x-SNAPSHOT/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md b/content/v1.11.x-SNAPSHOT/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md index 6df556e0b..45ade8eb7 100644 --- a/content/v1.11.x-SNAPSHOT/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md +++ b/content/v1.11.x-SNAPSHOT/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md @@ -20,10 +20,10 @@ for the non-managed follow only the steps related to the Airflow server and CLI. 2. Inside the App Registration, go to `Certificates & Secrets` and create a `Client secret`. Note down the `Value`, it will be our `clientSecret` configuration. 3. From the App Registration overview page, note down the `Application (client) ID` and the `Directory (tenant) ID`. -#### Managed Identity (recommnded) +#### Managed Identity (recommended) -1. In your Azure subscription create [Manged Identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) -2. Use this created identity - for AKS users this means you need to use [Pod Identity](https://learn.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) or [Workload Identity (recommnded)](https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview?tabs=dotnet). +1. In your Azure subscription create [Managed Identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) +2. Use this created identity - for AKS users this means you need to use [Pod Identity](https://learn.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) or [Workload Identity (recommended)](https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview?tabs=dotnet). {% note %} diff --git a/content/v1.11.x-SNAPSHOT/deployment/security/jwt-troubleshooting.md b/content/v1.11.x-SNAPSHOT/deployment/security/jwt-troubleshooting.md index dc2c343ed..c920c6989 100644 --- a/content/v1.11.x-SNAPSHOT/deployment/security/jwt-troubleshooting.md +++ b/content/v1.11.x-SNAPSHOT/deployment/security/jwt-troubleshooting.md @@ -6,7 +6,7 @@ collate: false --- # JWT Troubleshooting -Add the `{domain}:{port}/api/v1/sytem/config/jwks` in the list of publicKeys +Add the `{domain}:{port}/api/v1/system/config/jwks` in the list of publicKeys ```yaml authentication: diff --git a/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/apply-ui-changes.md b/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/apply-ui-changes.md index 3d43b7224..27b5d423d 100644 --- a/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/apply-ui-changes.md +++ b/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/apply-ui-changes.md @@ -128,7 +128,7 @@ In this section, we provide guides and references to use the MySQL connector. To extract metadata the user used in the connection needs to have access to the `INFORMATION_SCHEMA`. By default, a user can see only the rows in the `INFORMATION_SCHEMA` that correspond to objects for which the user has the proper access privileges. ~~~SQL --- Create user. If is ommited, defaults to '%' +-- Create user. If is omitted, defaults to '%' -- More details https://dev.mysql.com/doc/refman/8.0/en/create-user.html CREATE USER ''[@''] IDENTIFIED BY ''; diff --git a/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/develop-ingestion-code.md b/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/develop-ingestion-code.md index e2cd41ec0..bce285f6c 100644 --- a/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/develop-ingestion-code.md +++ b/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/develop-ingestion-code.md @@ -88,7 +88,7 @@ From the Service Topology you can understand what methods you need to implement: Can be found in [`ingestion/src/metadata/ingestion/source/database/database_service.py`](https://github.com/open-metadata/OpenMetadata/blob/main/ingestion/src/metadata/ingestion/source/database/database_service.py) {%inlineCallout icon="description" bold="OpenMetadata 1.6.0 or later" href="/deployment"%} -Starting from 1.6.0 the OpenMetadata Ingestion Framewotk is using a ServiceSpec specificaiton +Starting from 1.6.0 the OpenMetadata Ingestion Framewotk is using a ServiceSpec specification in order to define the entrypoints for the ingestion process. {%/inlineCallout%} diff --git a/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/test-it.md b/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/test-it.md index a206bd8d7..cc9cbacf8 100644 --- a/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/test-it.md +++ b/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/test-it.md @@ -5,7 +5,7 @@ slug: /developers/contribute/developing-a-new-connector/test-it # Test It -In order to test your new connector you need to run `make generate` from the project's root in order to generate the propert Python Classes from the JSON Schemas you created and modified. +In order to test your new connector you need to run `make generate` from the project's root in order to generate the property Python Classes from the JSON Schemas you created and modified. ## Unit Tests @@ -23,7 +23,7 @@ This could be slow and in order to iterate faster you could just run the tests y In order to test the connector using the CLI you first need to have the OpenMetadata stack running locally. The easiest way to do is to check how to do it [here](/developers/contribute/build-code-and-run-tests). -With it up and running you can install the ingestion pacakge locally and use the CLI directly: +With it up and running you can install the ingestion package locally and use the CLI directly: ```bash metadata ingest -c {your_yaml_file} diff --git a/content/v1.11.x-SNAPSHOT/how-to-guides/data-collaboration/tasks.md b/content/v1.11.x-SNAPSHOT/how-to-guides/data-collaboration/tasks.md index 39d1495b3..277840c2e 100644 --- a/content/v1.11.x-SNAPSHOT/how-to-guides/data-collaboration/tasks.md +++ b/content/v1.11.x-SNAPSHOT/how-to-guides/data-collaboration/tasks.md @@ -7,7 +7,7 @@ slug: /how-to-guides/data-collaboration/tasks # Create Tasks Tasks are an extension to the Conversation Threads feature where users can create tasks to -request to create or update description or tags for a data asset. Tasks are assgined to the owner of the data asset by default. If there are no owners, the task can be assigned to an appropriate user or team. +request to create or update description or tags for a data asset. Tasks are assigned to the owner of the data asset by default. If there are no owners, the task can be assigned to an appropriate user or team. {% image src="/images/v1.11/how-to-guides/collaboration/task.webp" diff --git a/content/v1.11.x-SNAPSHOT/how-to-guides/data-governance/automation/index.md b/content/v1.11.x-SNAPSHOT/how-to-guides/data-governance/automation/index.md index 90a272cdf..5c9463a9b 100644 --- a/content/v1.11.x-SNAPSHOT/how-to-guides/data-governance/automation/index.md +++ b/content/v1.11.x-SNAPSHOT/how-to-guides/data-governance/automation/index.md @@ -264,4 +264,4 @@ Note that this automation, the ML Tagging, will be deprecated in future releases - **Propagate Metadata Thoughtfully**: When propagating metadata via lineage, make sure that the source metadata is correct before applying it across multiple datasets. - **Start with Controlled Propagation**: For complex and large lineage trees, begin the propagation with a limited propagation depth (e.g., 2-3 levels/depth) and gradually increase as needed to avoid unintended widespread changes. - **Understand Path-Aware Depth Behavior**: In complex lineage with multiple parent paths, remember that propagation depth is calculated separately for each path from each root entity. This ensures precise control over which upstream sources contribute metadata to downstream assets. -- **Set Up Stop Conditions for Critical Data**: Cofigure strategic stop conditions around critical ownership boundaries or sensitive data boundaries (Tags- PII, Confidential) to prevent accidental metadata overwrites. +- **Set Up Stop Conditions for Critical Data**: Configure strategic stop conditions around critical ownership boundaries or sensitive data boundaries (Tags- PII, Confidential) to prevent accidental metadata overwrites. diff --git a/content/v1.11.x-SNAPSHOT/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md b/content/v1.11.x-SNAPSHOT/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md index 25af1b43c..63944fe32 100644 --- a/content/v1.11.x-SNAPSHOT/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md +++ b/content/v1.11.x-SNAPSHOT/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md @@ -7,7 +7,7 @@ collate: true # Governance Workflows - Periodic Batch Entity Trigger -The **Periodic Batch Entity Trigger** enables actions to be triggered on a periodic schedule, processing a batch of entitites at a time. +The **Periodic Batch Entity Trigger** enables actions to be triggered on a periodic schedule, processing a batch of entities at a time. This type of trigger is useful for automating regular workflows that need to run on a schedule. ## Configuration diff --git a/content/v1.11.x-SNAPSHOT/how-to-guides/data-insights/custom-dashboard.md b/content/v1.11.x-SNAPSHOT/how-to-guides/data-insights/custom-dashboard.md index e74ad76ec..476295b97 100644 --- a/content/v1.11.x-SNAPSHOT/how-to-guides/data-insights/custom-dashboard.md +++ b/content/v1.11.x-SNAPSHOT/how-to-guides/data-insights/custom-dashboard.md @@ -98,7 +98,7 @@ You can choose the field name from the fields dropdown from the chart by functio Remember, spaces in field names are replaced with a period (.), and the key should be in lower camel case (meaning the first word is lowercase, and each subsequent word starts with an uppercase letter). -**Query**: The query is to filter down the result to plot, use the format `q=': '`. The `q=` part signifies a query, adn the condition goes inside the single quotes. +**Query**: The query is to filter down the result to plot, use the format `q=': '`. The `q=` part signifies a query, and the condition goes inside the single quotes. The value can be a literal value or it can be `*` to signify that any value exists for the give field. diff --git a/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/observability/alerts.md b/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/observability/alerts.md index 6c5be3d93..3bb1e4397 100644 --- a/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/observability/alerts.md +++ b/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/observability/alerts.md @@ -33,7 +33,7 @@ The first will be to select a source. For data quality you have 2 relevant optio /%} -### Step 2 - Select a Filtering Conditon (optional) +### Step 2 - Select a Filtering Condition (optional) **Note:** if you do not set any filter the alert will apply to all test cases or test suite. You can filter alerts based on specific condition to narrow down which test suite/test case should trigger an alert. This is interesting for user to dispatch alerts to different channels/users. @@ -44,7 +44,7 @@ You can filter alerts based on specific condition to narrow down which test suit caption="Alerts Menu" /%} -### Step 3 - Select a Triggering Conditon +### Step 3 - Select a Triggering Condition Trigger section will allow you set the condition for which an alert should be triggered {% image diff --git a/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/profiler/profiler-workflow.md b/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/profiler/profiler-workflow.md index 5155b478d..6d9e0ad95 100644 --- a/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/profiler/profiler-workflow.md +++ b/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/profiler/profiler-workflow.md @@ -93,7 +93,7 @@ Set the number of rows to ingest when Ingest Sample Data toggle is on. Defaults Number of thread to use when computing metrics for the profiler. For Snowflake users we recommend setting it to 1. There is a known issue with one of the dependency (`snowflake-connector-python`) affecting projects with certain environments. **Timeout in Seconds (Optional)** -This will set the duration a profiling job against a table should wait before interrupting its execution and moving on to profiling the next table. It is important to note that the profiler will wait for the hanging query to terminiate before killing the execution. If there is a risk for your profiling job to hang, it is important to also set a query/connection timeout on your database engine. The default value for the profiler timeout is 12-hours. +This will set the duration a profiling job against a table should wait before interrupting its execution and moving on to profiling the next table. It is important to note that the profiler will wait for the hanging query to terminate before killing the execution. If there is a risk for your profiling job to hang, it is important to also set a query/connection timeout on your database engine. The default value for the profiler timeout is 12-hours. ### 3. Schedule and Deploy After clicking Next, you will be redirected to the Scheduling form. This will be the same as the Metadata and Usage Ingestions. Select your desired schedule and click on Deploy to find the usage pipeline being added to the Service Ingestions. diff --git a/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/quality/tests-yaml.md b/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/quality/tests-yaml.md index 87c173caf..870cbd3fc 100644 --- a/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/quality/tests-yaml.md +++ b/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/quality/tests-yaml.md @@ -316,12 +316,12 @@ Integrity ``` ### Table Custom SQL Test -Write you own SQL test. When writting your query you can use 2 strategies: -- `ROWS` (default): expects the query to be written as `SELECT , FROM WHERE `. **Note** if your query returns a large amount of rows it might cause an "Out Of Memeory" error. In this case we recomend you to use the `COUNT` strategy. +Write you own SQL test. When writing your query you can use 2 strategies: +- `ROWS` (default): expects the query to be written as `SELECT , FROM WHERE `. **Note** if your query returns a large amount of rows it might cause an "Out Of Memory" error. In this case we recommend you to use the `COUNT` strategy. - `COUNT`: expects the query to be written as `SELECT COUNT() FROM WHERE `. **How to use the Threshold Parameter?** -The threshold allows you to define a limit for which you test should pass or fail - by defaut this number is 0. For example if my custom SQL query test returns 10 rows (or a COUNT value of 10) and my threshold is 5 the test will fail. If I update my threshold to 11 on my next run my test will pass. +The threshold allows you to define a limit for which you test should pass or fail - by default this number is 0. For example if my custom SQL query test returns 10 rows (or a COUNT value of 10) and my threshold is 5 the test will fail. If I update my threshold to 11 on my next run my test will pass. {% note %} @@ -571,7 +571,7 @@ Accuracy **Properties** -* `column`: the colummn that will be used to chech the table freshness +* `column`: the column that will be used to chech the table freshness * `timeSinceUpdate`: (in seconds) The data is expected to be updated within this number of seconds. If the time since the last update is greater than this value, the test will fail. **Behavior** @@ -1520,7 +1520,7 @@ Accuracy ### Column Values To Be At Expected Location Validate the reference value for a column is a the expected geographic location -> Data will be temporarely stored in memory while the test case is running to validate the location. Not data will be permanently stored. +> Data will be temporarily stored in memory while the test case is running to validate the location. Not data will be permanently stored. > France is the only supported location at this time. To add any additional location please reach out to the team in our slack support channel **Dimension**: @@ -1528,7 +1528,7 @@ Accuracy **Properties** -* `locationReferenceType`: the type of location refernce `CITY` or `POSTAL_CODE` +* `locationReferenceType`: the type of location reference `CITY` or `POSTAL_CODE` * `longitudeColumnName`: longitude column name * `latitudeColumnName`: latitude column name * `radius`: radius in meter from which the location can be from the expected lat/long -- acts as a buffer diff --git a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md index b752fae19..3d580eb19 100644 --- a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md +++ b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md @@ -3,7 +3,7 @@ title: templateValidationReponse slug: /main-concepts/metadata-standard/schemas/email/templatevalidationreponse --- -# Email Template Validation Reponse +# Email Template Validation Response *Schema defining email templates.* diff --git a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md index 1e43e63e7..7a297ba89 100644 --- a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md +++ b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md @@ -5,7 +5,7 @@ slug: /main-concepts/metadata-standard/schemas/entity/feed/customproperty # CustomPropertyFeedInfo -*This schema defines the custom properties addition/deltion schema on feed.* +*This schema defines the custom properties addition/deletion schema on feed.* ## Properties diff --git a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md index a23d99b6c..4a05429d1 100644 --- a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md +++ b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/entity/services/connections/dashb ## Properties - **`type`**: Service Type. Refer to *#/definitions/qlikSenseType*. Default: `QlikSense`. -- **`displayUrl`** *(string)*: Qlik Sense Base URL, used for genrating dashboard & chat url. +- **`displayUrl`** *(string)*: Qlik Sense Base URL, used for generating dashboard & chat url. - **`hostPort`** *(string)*: URL for the Qlik instance. - **`certificates`** - **`validateHostName`** *(boolean)*: Validate Host Name. Default: `False`. diff --git a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md index 8e6c5ca1d..960a9f3ed 100644 --- a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md +++ b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/metadataingestion/databaseservice ## Properties - **`type`**: Pipeline type. Refer to *#/definitions/autoClassificationConfigType*. Default: `AutoClassification`. -- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, gloassary pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. +- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, glossary pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`schemaFilterPattern`**: Regex to only fetch tables or databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`tableFilterPattern`**: Regex exclude tables or databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`databaseFilterPattern`**: Regex to only fetch databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. diff --git a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md index 1cc51f5da..ded755a26 100644 --- a/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md +++ b/content/v1.11.x-SNAPSHOT/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md @@ -11,7 +11,7 @@ slug: /main-concepts/metadata-standard/schemas/metadataingestion/databaseservice - **`type`**: Pipeline type. Refer to *#/definitions/profilerConfigType*. Default: `Profiler`. - **`processingEngine`**: Refer to *#/definitions/processingEngine*. -- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, gloassary pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. +- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, glossary pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`schemaFilterPattern`**: Regex to only fetch tables or databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`tableFilterPattern`**: Regex exclude tables or databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`databaseFilterPattern`**: Regex to only fetch databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. diff --git a/content/v1.11.x-SNAPSHOT/sdk/go/index.md b/content/v1.11.x-SNAPSHOT/sdk/go/index.md index 0499ca32e..cb8db2bec 100644 --- a/content/v1.11.x-SNAPSHOT/sdk/go/index.md +++ b/content/v1.11.x-SNAPSHOT/sdk/go/index.md @@ -24,7 +24,7 @@ To create OpenMetadata Gateway, you will need to establish a connection with *Op * `BaseURL`: The url on which your instance of OpenMetadata is up and running (include the port if you need to e.g. http://localhost:8585). * `APIVersion`: pass an empty string -- this will be `v1` for now. * `Retry`: number of time the request should retry if the status code returned is in `RetryCodes`. Use `0` to use the default value -* `RetryWait`: number of second to wait betwee retries. Pass 0 to use the default value +* `RetryWait`: number of second to wait between retries. Pass 0 to use the default value * `RetryCodes`: HTTP status that will trigger a retry. Pass `nil` to use the default * `AuthTokenMode`: defaults to `Bearer` * `AccessToken`: JWT token use to authenticate the request diff --git a/content/v1.9.x/connectors/dashboard/domo-dashboard/yaml.md b/content/v1.9.x/connectors/dashboard/domo-dashboard/yaml.md index c87990911..b973e08aa 100644 --- a/content/v1.9.x/connectors/dashboard/domo-dashboard/yaml.md +++ b/content/v1.9.x/connectors/dashboard/domo-dashboard/yaml.md @@ -23,7 +23,7 @@ Configure and schedule DomoDashboard metadata and profiler workflows from the Op ## Requirements -**Note:** For metadata ingestion, kindly make sure add atleast `dashboard` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add at least `dashboard` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/content/v1.9.x/connectors/database/bigquery/index.md b/content/v1.9.x/connectors/database/bigquery/index.md index b2977357d..8ee84cb42 100644 --- a/content/v1.9.x/connectors/database/bigquery/index.md +++ b/content/v1.9.x/connectors/database/bigquery/index.md @@ -1,6 +1,6 @@ --- title: BigQuery | OpenMetadata Connector Setup & Integration Guide -description: Connect BigQuery to OpenMetadata seamlessly with our comprehensive database connector guide. Setup instructions, configuration tips, and metadata extrac... +description: Connect BigQuery to OpenMetadata seamlessly with our comprehensive database connector guide. Setup instructions, configuration tips, and metadata extract... slug: /connectors/database/bigquery --- diff --git a/content/v1.9.x/connectors/database/cassandra/index.md b/content/v1.9.x/connectors/database/cassandra/index.md index 2a7f9a02f..8ee9d7fdc 100644 --- a/content/v1.9.x/connectors/database/cassandra/index.md +++ b/content/v1.9.x/connectors/database/cassandra/index.md @@ -48,7 +48,7 @@ To extract metadata using the Cassandra connector, ensure the user in the connec #### Connection Details - **Username**: Username to connect to Cassandra. This user must have the necessary permissions to perform metadata extraction and table queries. -- **Host Port**: When using the `cassandra` connecion schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`.- **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. +- **Host Port**: When using the `cassandra` connection schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`.- **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. **Auth Type**: Following authentication types are supported: 1. **Basic Authentication**: diff --git a/content/v1.9.x/connectors/database/cassandra/yaml.md b/content/v1.9.x/connectors/database/cassandra/yaml.md index 85935653c..a70663ba0 100644 --- a/content/v1.9.x/connectors/database/cassandra/yaml.md +++ b/content/v1.9.x/connectors/database/cassandra/yaml.md @@ -73,7 +73,7 @@ This is a sample config for Cassandra: {% codeInfo srNumber=3 %} -**hostPort**: When using the `cassandra` connecion schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`. +**hostPort**: When using the `cassandra` connection schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`. {% /codeInfo %} diff --git a/content/v1.9.x/connectors/database/domo-database/yaml.md b/content/v1.9.x/connectors/database/domo-database/yaml.md index 78fb15a0c..fe69ecc6d 100644 --- a/content/v1.9.x/connectors/database/domo-database/yaml.md +++ b/content/v1.9.x/connectors/database/domo-database/yaml.md @@ -26,7 +26,7 @@ Configure and schedule DomoDatabase metadata and profiler workflows from the Ope **Note:** -For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. +For metadata ingestion, kindly make sure add at least `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). diff --git a/content/v1.9.x/connectors/database/mongodb/index.md b/content/v1.9.x/connectors/database/mongodb/index.md index d60622857..f7c39a54c 100644 --- a/content/v1.9.x/connectors/database/mongodb/index.md +++ b/content/v1.9.x/connectors/database/mongodb/index.md @@ -65,7 +65,7 @@ For a complete guide on managing secrets in hybrid setups, see the [Hybrid Inges - **Username**: Username to connect to Mongodb. This user must have access to perform `find` operation on collection and `listCollection` operations on database available in MongoDB. - **Password**: Password to connect to MongoDB. -- **Host Port**: When using the `mongodb` connecion schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. +- **Host Port**: When using the `mongodb` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. - **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. Using Atlas? Follow [this guide](https://www.mongodb.com/docs/guides/atlas/connection-string/) to get the connection string. diff --git a/content/v1.9.x/connectors/database/mongodb/yaml.md b/content/v1.9.x/connectors/database/mongodb/yaml.md index 0c127ebd3..66a19b9b5 100644 --- a/content/v1.9.x/connectors/database/mongodb/yaml.md +++ b/content/v1.9.x/connectors/database/mongodb/yaml.md @@ -75,7 +75,7 @@ This is a sample config for MongoDB: {% codeInfo srNumber=3 %} -**hostPort**: When using the `mongodb` connecion schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. +**hostPort**: When using the `mongodb` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. Using Atlas? Follow [this guide](https://www.mongodb.com/docs/guides/atlas/connection-string/) to get the connection string. diff --git a/content/v1.9.x/connectors/database/redshift/index.md b/content/v1.9.x/connectors/database/redshift/index.md index 0b67444b7..a95fb638e 100644 --- a/content/v1.9.x/connectors/database/redshift/index.md +++ b/content/v1.9.x/connectors/database/redshift/index.md @@ -72,7 +72,7 @@ For the usage and lineage workflow, the user will need `SELECT` privilege on `ST } /%} -It is recommmended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. +It is recommended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. {% stepsContainer %} {% extraContent parentTagName="stepsContainer" %} diff --git a/content/v1.9.x/connectors/database/redshift/troubleshooting.md b/content/v1.9.x/connectors/database/redshift/troubleshooting.md index 1d3cc6f9f..020010561 100644 --- a/content/v1.9.x/connectors/database/redshift/troubleshooting.md +++ b/content/v1.9.x/connectors/database/redshift/troubleshooting.md @@ -23,7 +23,7 @@ src="/images/v1.9/connectors/redshift/service-connection-arguments.png" alt="Configure service connection" caption="Configure the service connection by filling the form" /%} -### Metdata Ingestion Failure +### Metadata Ingestion Failure If your metadata ingesiton fails and you have errors like: diff --git a/content/v1.9.x/connectors/database/redshift/yaml.md b/content/v1.9.x/connectors/database/redshift/yaml.md index a98d473c2..a40b03912 100644 --- a/content/v1.9.x/connectors/database/redshift/yaml.md +++ b/content/v1.9.x/connectors/database/redshift/yaml.md @@ -68,7 +68,7 @@ The workflow is modeled around the following **Note:** During the metadata ingestion for redshift, the tables in which the distribution style i.e `DISTSTYLE` is not `AUTO` will be marked as partitioned tables -It is recommmended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. +It is recommended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. ### 1. Define the YAML Config diff --git a/content/v1.9.x/connectors/pipeline/domo-pipeline/yaml.md b/content/v1.9.x/connectors/pipeline/domo-pipeline/yaml.md index 47211a89e..e1a1c13b1 100644 --- a/content/v1.9.x/connectors/pipeline/domo-pipeline/yaml.md +++ b/content/v1.9.x/connectors/pipeline/domo-pipeline/yaml.md @@ -24,7 +24,7 @@ Configure and schedule Domo Pipeline metadata and profiler workflows from the Op ## Requirements -**Note:** For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add at least `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/content/v1.9.x/connectors/troubleshoot/index.md b/content/v1.9.x/connectors/troubleshoot/index.md index dde477246..89638eaea 100644 --- a/content/v1.9.x/connectors/troubleshoot/index.md +++ b/content/v1.9.x/connectors/troubleshoot/index.md @@ -177,8 +177,8 @@ In Connection details section page click on Create a personal access token. {% image src="/images/v1.9/connectors/databricks/Open-create-tocken-page.png" -alt="Open create tocken" -caption="Open create tocken" /%} +alt="Open create token" +caption="Open create token" /%} @@ -187,8 +187,8 @@ Now In this page you can create new `token`. {% image src="/images/v1.9/connectors/databricks/Generate-token.png" -alt="Generate tocken" -caption="Generate tocken" /%} +alt="Generate token" +caption="Generate token" /%} ## Domo Database @@ -395,7 +395,7 @@ src="/images/v1.9/connectors/redshift/service-connection-arguments.png" alt="Configure service connection" caption="Configure the service connection by filling the form" /%} -### Metdata Ingestion Failure +### Metadata Ingestion Failure If your metadata ingesiton fails and you have errors like: diff --git a/content/v1.9.x/deployment/azure-passwordless-auth.md b/content/v1.9.x/deployment/azure-passwordless-auth.md index a89745f17..eb10d1fc6 100644 --- a/content/v1.9.x/deployment/azure-passwordless-auth.md +++ b/content/v1.9.x/deployment/azure-passwordless-auth.md @@ -7,7 +7,7 @@ collate: false # Azure - Enable Passwordless Database Backend Connection By Default, OpenMetadata supports basic authentication when connecting to MySQL/PostgreSQL as Database backend. With Azure, you can enhance the security for configuring Database configurations other the basic authentication mechanism. -This guide will help you setup the application to use passwordless approach for Azure PaaS Databases (preferrably [Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) and [Azure Database for MySQL - Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/overview)). +This guide will help you setup the application to use passwordless approach for Azure PaaS Databases (preferably [Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) and [Azure Database for MySQL - Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/overview)). # Prerequisites diff --git a/content/v1.9.x/deployment/kubernetes/aks.md b/content/v1.9.x/deployment/kubernetes/aks.md index a7c5f6979..4cc29ad3c 100644 --- a/content/v1.9.x/deployment/kubernetes/aks.md +++ b/content/v1.9.x/deployment/kubernetes/aks.md @@ -167,7 +167,7 @@ kubectl apply -f permissions_pod.yaml helm repo add open-metadata https://helm.open-metadata.org/ ``` #### Create secrets -It is recommeded to use external database and search for production deplyoments. The following implementation uses external postgresql DB from Azure Database. Any of the popular databases can be used. The default implementation uses mysql. +It is recommended to use external database and search for production deployments. The following implementation uses external postgresql DB from Azure Database. Any of the popular databases can be used. The default implementation uses mysql. ```azure-cli kubectl create secret generic airflow-secrets \ diff --git a/content/v1.9.x/deployment/minimum-requirements.md b/content/v1.9.x/deployment/minimum-requirements.md index e5641c277..00d02d162 100644 --- a/content/v1.9.x/deployment/minimum-requirements.md +++ b/content/v1.9.x/deployment/minimum-requirements.md @@ -35,7 +35,7 @@ Our minimum specs recommendation for ElasticSearch / OpenSearch deployment is - 2 vCPUs - 8 GiB Memory - 100 GiB Storage (per node) -- Master / Worker Nodes with atleast 1 Master and 2 Worker Nodes +- Master / Worker Nodes with at least 1 Master and 2 Worker Nodes ### Software Requirements diff --git a/content/v1.9.x/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md b/content/v1.9.x/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md index 6df556e0b..45ade8eb7 100644 --- a/content/v1.9.x/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md +++ b/content/v1.9.x/deployment/secrets-manager/supported-implementations/azure-key-vault/index.md @@ -20,10 +20,10 @@ for the non-managed follow only the steps related to the Airflow server and CLI. 2. Inside the App Registration, go to `Certificates & Secrets` and create a `Client secret`. Note down the `Value`, it will be our `clientSecret` configuration. 3. From the App Registration overview page, note down the `Application (client) ID` and the `Directory (tenant) ID`. -#### Managed Identity (recommnded) +#### Managed Identity (recommended) -1. In your Azure subscription create [Manged Identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) -2. Use this created identity - for AKS users this means you need to use [Pod Identity](https://learn.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) or [Workload Identity (recommnded)](https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview?tabs=dotnet). +1. In your Azure subscription create [Managed Identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) +2. Use this created identity - for AKS users this means you need to use [Pod Identity](https://learn.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) or [Workload Identity (recommended)](https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview?tabs=dotnet). {% note %} diff --git a/content/v1.9.x/deployment/security/jwt-troubleshooting.md b/content/v1.9.x/deployment/security/jwt-troubleshooting.md index 9f1354618..5ee4fd1bf 100644 --- a/content/v1.9.x/deployment/security/jwt-troubleshooting.md +++ b/content/v1.9.x/deployment/security/jwt-troubleshooting.md @@ -6,7 +6,7 @@ collate: false --- # JWT Troubleshooting -Add the `{domain}:{port}/api/v1/sytem/config/jwks` in the list of publicKeys +Add the `{domain}:{port}/api/v1/system/config/jwks` in the list of publicKeys ```yaml authentication: diff --git a/content/v1.9.x/developers/contribute/developing-a-new-connector/apply-ui-changes.md b/content/v1.9.x/developers/contribute/developing-a-new-connector/apply-ui-changes.md index 3d43b7224..27b5d423d 100644 --- a/content/v1.9.x/developers/contribute/developing-a-new-connector/apply-ui-changes.md +++ b/content/v1.9.x/developers/contribute/developing-a-new-connector/apply-ui-changes.md @@ -128,7 +128,7 @@ In this section, we provide guides and references to use the MySQL connector. To extract metadata the user used in the connection needs to have access to the `INFORMATION_SCHEMA`. By default, a user can see only the rows in the `INFORMATION_SCHEMA` that correspond to objects for which the user has the proper access privileges. ~~~SQL --- Create user. If is ommited, defaults to '%' +-- Create user. If is omitted, defaults to '%' -- More details https://dev.mysql.com/doc/refman/8.0/en/create-user.html CREATE USER ''[@''] IDENTIFIED BY ''; diff --git a/content/v1.9.x/developers/contribute/developing-a-new-connector/develop-ingestion-code.md b/content/v1.9.x/developers/contribute/developing-a-new-connector/develop-ingestion-code.md index e2cd41ec0..bce285f6c 100644 --- a/content/v1.9.x/developers/contribute/developing-a-new-connector/develop-ingestion-code.md +++ b/content/v1.9.x/developers/contribute/developing-a-new-connector/develop-ingestion-code.md @@ -88,7 +88,7 @@ From the Service Topology you can understand what methods you need to implement: Can be found in [`ingestion/src/metadata/ingestion/source/database/database_service.py`](https://github.com/open-metadata/OpenMetadata/blob/main/ingestion/src/metadata/ingestion/source/database/database_service.py) {%inlineCallout icon="description" bold="OpenMetadata 1.6.0 or later" href="/deployment"%} -Starting from 1.6.0 the OpenMetadata Ingestion Framewotk is using a ServiceSpec specificaiton +Starting from 1.6.0 the OpenMetadata Ingestion Framewotk is using a ServiceSpec specification in order to define the entrypoints for the ingestion process. {%/inlineCallout%} diff --git a/content/v1.9.x/developers/contribute/developing-a-new-connector/test-it.md b/content/v1.9.x/developers/contribute/developing-a-new-connector/test-it.md index a206bd8d7..cc9cbacf8 100644 --- a/content/v1.9.x/developers/contribute/developing-a-new-connector/test-it.md +++ b/content/v1.9.x/developers/contribute/developing-a-new-connector/test-it.md @@ -5,7 +5,7 @@ slug: /developers/contribute/developing-a-new-connector/test-it # Test It -In order to test your new connector you need to run `make generate` from the project's root in order to generate the propert Python Classes from the JSON Schemas you created and modified. +In order to test your new connector you need to run `make generate` from the project's root in order to generate the property Python Classes from the JSON Schemas you created and modified. ## Unit Tests @@ -23,7 +23,7 @@ This could be slow and in order to iterate faster you could just run the tests y In order to test the connector using the CLI you first need to have the OpenMetadata stack running locally. The easiest way to do is to check how to do it [here](/developers/contribute/build-code-and-run-tests). -With it up and running you can install the ingestion pacakge locally and use the CLI directly: +With it up and running you can install the ingestion package locally and use the CLI directly: ```bash metadata ingest -c {your_yaml_file} diff --git a/content/v1.9.x/how-to-guides/data-collaboration/tasks.md b/content/v1.9.x/how-to-guides/data-collaboration/tasks.md index 994e2acc2..6c8b3f64a 100644 --- a/content/v1.9.x/how-to-guides/data-collaboration/tasks.md +++ b/content/v1.9.x/how-to-guides/data-collaboration/tasks.md @@ -7,7 +7,7 @@ slug: /how-to-guides/data-collaboration/tasks # Create Tasks Tasks are an extension to the Conversation Threads feature where users can create tasks to -request to create or update description or tags for a data asset. Tasks are assgined to the owner of the data asset by default. If there are no owners, the task can be assigned to an appropriate user or team. +request to create or update description or tags for a data asset. Tasks are assigned to the owner of the data asset by default. If there are no owners, the task can be assigned to an appropriate user or team. {% image src="/images/v1.9/how-to-guides/collaboration/task.webp" diff --git a/content/v1.9.x/how-to-guides/data-governance/automation/index.md b/content/v1.9.x/how-to-guides/data-governance/automation/index.md index ed306669a..236640980 100644 --- a/content/v1.9.x/how-to-guides/data-governance/automation/index.md +++ b/content/v1.9.x/how-to-guides/data-governance/automation/index.md @@ -264,4 +264,4 @@ Note that this automation, the ML Tagging, will be deprecated in future releases - **Propagate Metadata Thoughtfully**: When propagating metadata via lineage, make sure that the source metadata is correct before applying it across multiple datasets. - **Start with Controlled Propagation**: For complex and large lineage trees, begin the propagation with a limited propagation depth (e.g., 2-3 levels/depth) and gradually increase as needed to avoid unintended widespread changes. - **Understand Path-Aware Depth Behavior**: In complex lineage with multiple parent paths, remember that propagation depth is calculated separately for each path from each root entity. This ensures precise control over which upstream sources contribute metadata to downstream assets. -- **Set Up Stop Conditions for Critical Data**: Cofigure strategic stop conditions around critical ownership boundaries or sensitive data boundaries (Tags- PII, Confidential) to prevent accidental metadata overwrites. +- **Set Up Stop Conditions for Critical Data**: Configure strategic stop conditions around critical ownership boundaries or sensitive data boundaries (Tags- PII, Confidential) to prevent accidental metadata overwrites. diff --git a/content/v1.9.x/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md b/content/v1.9.x/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md index e8404753a..99b46a2d7 100644 --- a/content/v1.9.x/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md +++ b/content/v1.9.x/how-to-guides/data-governance/workflows/elements/triggers/periodic-batch-entity-trigger.md @@ -7,7 +7,7 @@ collate: true # Governance Workflows - Periodic Batch Entity Trigger -The **Periodic Batch Entity Trigger** enables actions to be triggered on a periodic schedule, processing a batch of entitites at a time. +The **Periodic Batch Entity Trigger** enables actions to be triggered on a periodic schedule, processing a batch of entities at a time. This type of trigger is useful for automating regular workflows that need to run on a schedule. ## Configuration diff --git a/content/v1.9.x/how-to-guides/data-insights/custom-dashboard.md b/content/v1.9.x/how-to-guides/data-insights/custom-dashboard.md index baa9c247a..fc5943a4d 100644 --- a/content/v1.9.x/how-to-guides/data-insights/custom-dashboard.md +++ b/content/v1.9.x/how-to-guides/data-insights/custom-dashboard.md @@ -98,7 +98,7 @@ You can choose the field name from the fields dropdown from the chart by functio Remember, spaces in field names are replaced with a period (.), and the key should be in lower camel case (meaning the first word is lowercase, and each subsequent word starts with an uppercase letter). -**Query**: The query is to filter down the result to plot, use the format `q=': '`. The `q=` part signifies a query, adn the condition goes inside the single quotes. +**Query**: The query is to filter down the result to plot, use the format `q=': '`. The `q=` part signifies a query, and the condition goes inside the single quotes. The value can be a literal value or it can be `*` to signify that any value exists for the give field. diff --git a/content/v1.9.x/how-to-guides/data-quality-observability/observability/alerts.md b/content/v1.9.x/how-to-guides/data-quality-observability/observability/alerts.md index 0fd8ffa8b..f443654f5 100644 --- a/content/v1.9.x/how-to-guides/data-quality-observability/observability/alerts.md +++ b/content/v1.9.x/how-to-guides/data-quality-observability/observability/alerts.md @@ -33,7 +33,7 @@ The first will be to select a source. For data quality you have 2 relevant optio /%} -### Step 2 - Select a Filtering Conditon (optional) +### Step 2 - Select a Filtering Condition (optional) **Note:** if you do not set any filter the alert will apply to all test cases or test suite. You can filter alerts based on specific condition to narrow down which test suite/test case should trigger an alert. This is interesting for user to dispatch alerts to different channels/users. @@ -44,7 +44,7 @@ You can filter alerts based on specific condition to narrow down which test suit caption="Alerts Menu" /%} -### Step 3 - Select a Triggering Conditon +### Step 3 - Select a Triggering Condition Trigger section will allow you set the condition for which an alert should be triggered {% image diff --git a/content/v1.9.x/how-to-guides/data-quality-observability/profiler/profiler-workflow.md b/content/v1.9.x/how-to-guides/data-quality-observability/profiler/profiler-workflow.md index 90816bb28..55e514d2f 100644 --- a/content/v1.9.x/how-to-guides/data-quality-observability/profiler/profiler-workflow.md +++ b/content/v1.9.x/how-to-guides/data-quality-observability/profiler/profiler-workflow.md @@ -93,7 +93,7 @@ Set the number of rows to ingest when Ingest Sample Data toggle is on. Defaults Number of thread to use when computing metrics for the profiler. For Snowflake users we recommend setting it to 1. There is a known issue with one of the dependency (`snowflake-connector-python`) affecting projects with certain environments. **Timeout in Seconds (Optional)** -This will set the duration a profiling job against a table should wait before interrupting its execution and moving on to profiling the next table. It is important to note that the profiler will wait for the hanging query to terminiate before killing the execution. If there is a risk for your profiling job to hang, it is important to also set a query/connection timeout on your database engine. The default value for the profiler timeout is 12-hours. +This will set the duration a profiling job against a table should wait before interrupting its execution and moving on to profiling the next table. It is important to note that the profiler will wait for the hanging query to terminate before killing the execution. If there is a risk for your profiling job to hang, it is important to also set a query/connection timeout on your database engine. The default value for the profiler timeout is 12-hours. ### 3. Schedule and Deploy After clicking Next, you will be redirected to the Scheduling form. This will be the same as the Metadata and Usage Ingestions. Select your desired schedule and click on Deploy to find the usage pipeline being added to the Service Ingestions. diff --git a/content/v1.9.x/how-to-guides/data-quality-observability/quality/tests-yaml.md b/content/v1.9.x/how-to-guides/data-quality-observability/quality/tests-yaml.md index 87c173caf..870cbd3fc 100644 --- a/content/v1.9.x/how-to-guides/data-quality-observability/quality/tests-yaml.md +++ b/content/v1.9.x/how-to-guides/data-quality-observability/quality/tests-yaml.md @@ -316,12 +316,12 @@ Integrity ``` ### Table Custom SQL Test -Write you own SQL test. When writting your query you can use 2 strategies: -- `ROWS` (default): expects the query to be written as `SELECT , FROM WHERE `. **Note** if your query returns a large amount of rows it might cause an "Out Of Memeory" error. In this case we recomend you to use the `COUNT` strategy. +Write you own SQL test. When writing your query you can use 2 strategies: +- `ROWS` (default): expects the query to be written as `SELECT , FROM WHERE `. **Note** if your query returns a large amount of rows it might cause an "Out Of Memory" error. In this case we recommend you to use the `COUNT` strategy. - `COUNT`: expects the query to be written as `SELECT COUNT() FROM WHERE `. **How to use the Threshold Parameter?** -The threshold allows you to define a limit for which you test should pass or fail - by defaut this number is 0. For example if my custom SQL query test returns 10 rows (or a COUNT value of 10) and my threshold is 5 the test will fail. If I update my threshold to 11 on my next run my test will pass. +The threshold allows you to define a limit for which you test should pass or fail - by default this number is 0. For example if my custom SQL query test returns 10 rows (or a COUNT value of 10) and my threshold is 5 the test will fail. If I update my threshold to 11 on my next run my test will pass. {% note %} @@ -571,7 +571,7 @@ Accuracy **Properties** -* `column`: the colummn that will be used to chech the table freshness +* `column`: the column that will be used to chech the table freshness * `timeSinceUpdate`: (in seconds) The data is expected to be updated within this number of seconds. If the time since the last update is greater than this value, the test will fail. **Behavior** @@ -1520,7 +1520,7 @@ Accuracy ### Column Values To Be At Expected Location Validate the reference value for a column is a the expected geographic location -> Data will be temporarely stored in memory while the test case is running to validate the location. Not data will be permanently stored. +> Data will be temporarily stored in memory while the test case is running to validate the location. Not data will be permanently stored. > France is the only supported location at this time. To add any additional location please reach out to the team in our slack support channel **Dimension**: @@ -1528,7 +1528,7 @@ Accuracy **Properties** -* `locationReferenceType`: the type of location refernce `CITY` or `POSTAL_CODE` +* `locationReferenceType`: the type of location reference `CITY` or `POSTAL_CODE` * `longitudeColumnName`: longitude column name * `latitudeColumnName`: latitude column name * `radius`: radius in meter from which the location can be from the expected lat/long -- acts as a buffer diff --git a/content/v1.9.x/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md b/content/v1.9.x/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md index c886e30f8..12696a47f 100644 --- a/content/v1.9.x/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md +++ b/content/v1.9.x/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md @@ -4,7 +4,7 @@ description: Connect Templatevalidationreponse to enable streamlined access, mon slug: /main-concepts/metadata-standard/schemas/email/templatevalidationreponse --- -# Email Template Validation Reponse +# Email Template Validation Response *Schema defining email templates.* diff --git a/content/v1.9.x/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md b/content/v1.9.x/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md index 6a41ecc58..88dc5d971 100644 --- a/content/v1.9.x/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md +++ b/content/v1.9.x/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md @@ -6,7 +6,7 @@ slug: /main-concepts/metadata-standard/schemas/entity/feed/customproperty # CustomPropertyFeedInfo -*This schema defines the custom properties addition/deltion schema on feed.* +*This schema defines the custom properties addition/deletion schema on feed.* ## Properties diff --git a/content/v1.9.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md b/content/v1.9.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md index e61febc2c..2fcac4a26 100644 --- a/content/v1.9.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md +++ b/content/v1.9.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md @@ -11,7 +11,7 @@ slug: /main-concepts/metadata-standard/schemas/entity/services/connections/dashb ## Properties - **`type`**: Service Type. Refer to *[#/definitions/qlikSenseType](#definitions/qlikSenseType)*. Default: `"QlikSense"`. -- **`displayUrl`** *(string, format: uri)*: Qlik Sense Base URL, used for genrating dashboard & chat url. +- **`displayUrl`** *(string, format: uri)*: Qlik Sense Base URL, used for generating dashboard & chat url. - **`hostPort`** *(string, format: uri)*: URL for the Qlik instance. - **`certificates`** - **One of** diff --git a/content/v1.9.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md b/content/v1.9.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md index 6507521cc..ac320a54d 100644 --- a/content/v1.9.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md +++ b/content/v1.9.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md @@ -11,7 +11,7 @@ slug: /main-concepts/metadata-standard/schemas/metadataingestion/databaseservice ## Properties - **`type`**: Pipeline type. Refer to *[#/definitions/autoClassificationConfigType](#definitions/autoClassificationConfigType)*. Default: `"AutoClassification"`. -- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, gloassary pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. +- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, glossary pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. - **`schemaFilterPattern`**: Regex to only fetch tables or databases that matches the pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. - **`tableFilterPattern`**: Regex exclude tables or databases that matches the pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. - **`databaseFilterPattern`**: Regex to only fetch databases that matches the pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. diff --git a/content/v1.9.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md b/content/v1.9.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md index 6186f61aa..b48d3039c 100644 --- a/content/v1.9.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md +++ b/content/v1.9.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md @@ -11,7 +11,7 @@ slug: /main-concepts/metadata-standard/schemas/metadataingestion/databaseservice ## Properties - **`type`**: Pipeline type. Refer to *[#/definitions/profilerConfigType](#definitions/profilerConfigType)*. Default: `"Profiler"`. -- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, gloassary pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. +- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, glossary pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. - **`schemaFilterPattern`**: Regex to only fetch tables or databases that matches the pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. - **`tableFilterPattern`**: Regex exclude tables or databases that matches the pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. - **`databaseFilterPattern`**: Regex to only fetch databases that matches the pattern. Refer to *[../type/filterPattern.json#/definitions/filterPattern](#/type/filterPattern.json#/definitions/filterPattern)*. diff --git a/content/v1.9.x/sdk/go/index.md b/content/v1.9.x/sdk/go/index.md index 0499ca32e..cb8db2bec 100644 --- a/content/v1.9.x/sdk/go/index.md +++ b/content/v1.9.x/sdk/go/index.md @@ -24,7 +24,7 @@ To create OpenMetadata Gateway, you will need to establish a connection with *Op * `BaseURL`: The url on which your instance of OpenMetadata is up and running (include the port if you need to e.g. http://localhost:8585). * `APIVersion`: pass an empty string -- this will be `v1` for now. * `Retry`: number of time the request should retry if the status code returned is in `RetryCodes`. Use `0` to use the default value -* `RetryWait`: number of second to wait betwee retries. Pass 0 to use the default value +* `RetryWait`: number of second to wait between retries. Pass 0 to use the default value * `RetryCodes`: HTTP status that will trigger a retry. Pass `nil` to use the default * `AuthTokenMode`: defaults to `Bearer` * `AccessToken`: JWT token use to authenticate the request From 1b1ceecc9104b89485e516838c0a1d17aee0ebe9 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 12 Nov 2025 13:04:02 +0000 Subject: [PATCH 4/4] Fix additional typos in documentation and constants Co-authored-by: ayush-shah <40225091+ayush-shah@users.noreply.github.com> --- constants/Roadmap.constants.ts | 8 ++++---- .../connectors/dashboard/domo-dashboard/yaml.md | 2 +- content/v1.10.x/connectors/database/cassandra/index.md | 2 +- content/v1.10.x/connectors/database/cassandra/yaml.md | 2 +- .../v1.10.x/connectors/database/domo-database/yaml.md | 2 +- content/v1.10.x/connectors/database/mongodb/index.md | 2 +- content/v1.10.x/connectors/database/mongodb/yaml.md | 2 +- content/v1.10.x/connectors/database/redshift/index.md | 2 +- .../connectors/database/redshift/troubleshooting.md | 2 +- content/v1.10.x/connectors/database/redshift/yaml.md | 2 +- .../workflows/metadata/incremental-extraction/index.md | 2 +- .../v1.10.x/connectors/pipeline/domo-pipeline/yaml.md | 2 +- .../v1.10.x/connectors/pipeline/openlineage/index.md | 6 +++--- content/v1.10.x/connectors/troubleshoot/index.md | 10 +++++----- content/v1.10.x/deployment/kubernetes/aks.md | 2 +- .../developing-a-new-connector/define-json-schema.md | 2 +- .../data-quality-observability/quality/tests-yaml.md | 2 +- .../schemas/email/templateValidationReponse.md | 2 +- .../schemas/entity/feed/customProperty.md | 2 +- .../connections/dashboard/qlikSenseConnection.md | 2 +- .../databaseServiceAutoClassificationPipeline.md | 2 +- .../databaseServiceProfilerPipeline.md | 2 +- content/v1.10.x/sdk/go/index.md | 2 +- .../workflows/metadata/incremental-extraction/index.md | 2 +- .../connectors/pipeline/openlineage/index.md | 6 +++--- content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md | 2 +- .../developing-a-new-connector/define-json-schema.md | 2 +- .../data-quality-observability/quality/tests-yaml.md | 2 +- .../workflows/metadata/incremental-extraction/index.md | 2 +- .../v1.9.x/connectors/pipeline/openlineage/index.md | 6 +++--- content/v1.9.x/deployment/kubernetes/aks.md | 2 +- .../developing-a-new-connector/define-json-schema.md | 2 +- .../data-quality-observability/quality/tests-yaml.md | 2 +- 33 files changed, 46 insertions(+), 46 deletions(-) diff --git a/constants/Roadmap.constants.ts b/constants/Roadmap.constants.ts index 5d4496141..cc6d499be 100644 --- a/constants/Roadmap.constants.ts +++ b/constants/Roadmap.constants.ts @@ -270,7 +270,7 @@ export const ROADMAP_DATA: RoadmapData = { { label: "Glossary Terms Relations", description: - "Users can define relationships and associate realtionship between entities and terms. ", + "Users can define relationships and associate relationship between entities and terms. ", }, ], release: 1.5, @@ -333,7 +333,7 @@ export const ROADMAP_DATA: RoadmapData = { { label: "Data Asset Naming Conventions", description: - "Define Data Asset naming convetions and use this app to enforce and send a report to teams.", + "Define Data Asset naming conventions and use this app to enforce and send a report to teams.", }, { label: "Reverse Metadata Applications", @@ -395,7 +395,7 @@ export const ROADMAP_DATA: RoadmapData = { { label: "Pipeline Dashboard", description: - "Pipeline Dashboard - Show all ETL pipelines in an organization and how many succesful/failed/aborted/paused.", + "Pipeline Dashboard - Show all ETL pipelines in an organization and how many successful/failed/aborted/paused.", }, ], release: 1.7, @@ -450,7 +450,7 @@ export const ROADMAP_DATA: RoadmapData = { { label: "Pipeline Dashboard", description: - "Pipeline Dashboard - Show all ETL pipelines in an organization and how many succesful/failed/aborted/paused.", + "Pipeline Dashboard - Show all ETL pipelines in an organization and how many successful/failed/aborted/paused.", }, ], release: 1.8, diff --git a/content/v1.10.x/connectors/dashboard/domo-dashboard/yaml.md b/content/v1.10.x/connectors/dashboard/domo-dashboard/yaml.md index 6e3ee2c92..ea5f5ec28 100644 --- a/content/v1.10.x/connectors/dashboard/domo-dashboard/yaml.md +++ b/content/v1.10.x/connectors/dashboard/domo-dashboard/yaml.md @@ -23,7 +23,7 @@ Configure and schedule DomoDashboard metadata and profiler workflows from the Op ## Requirements -**Note:** For metadata ingestion, kindly make sure add atleast `dashboard` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add at least `dashboard` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/content/v1.10.x/connectors/database/cassandra/index.md b/content/v1.10.x/connectors/database/cassandra/index.md index bbd156989..0c7bb8c64 100644 --- a/content/v1.10.x/connectors/database/cassandra/index.md +++ b/content/v1.10.x/connectors/database/cassandra/index.md @@ -48,7 +48,7 @@ To extract metadata using the Cassandra connector, ensure the user in the connec #### Connection Details - **Username**: Username to connect to Cassandra. This user must have the necessary permissions to perform metadata extraction and table queries. -- **Host Port**: When using the `cassandra` connecion schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`.- **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. +- **Host Port**: When using the `cassandra` connection schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`.- **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. **Auth Type**: Following authentication types are supported: 1. **Basic Authentication**: diff --git a/content/v1.10.x/connectors/database/cassandra/yaml.md b/content/v1.10.x/connectors/database/cassandra/yaml.md index 1ea7fb056..00c0c13ad 100644 --- a/content/v1.10.x/connectors/database/cassandra/yaml.md +++ b/content/v1.10.x/connectors/database/cassandra/yaml.md @@ -73,7 +73,7 @@ This is a sample config for Cassandra: {% codeInfo srNumber=3 %} -**hostPort**: When using the `cassandra` connecion schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`. +**hostPort**: When using the `cassandra` connection schema, the hostPort parameter specifies the host and port of the Cassandra. This should be specified as a string in the format `hostname:port`. E.g., `localhost:9042`. {% /codeInfo %} diff --git a/content/v1.10.x/connectors/database/domo-database/yaml.md b/content/v1.10.x/connectors/database/domo-database/yaml.md index 7de7375a5..7df65bc7a 100644 --- a/content/v1.10.x/connectors/database/domo-database/yaml.md +++ b/content/v1.10.x/connectors/database/domo-database/yaml.md @@ -26,7 +26,7 @@ Configure and schedule DomoDatabase metadata and profiler workflows from the Ope **Note:** -For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. +For metadata ingestion, kindly make sure add at least `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). diff --git a/content/v1.10.x/connectors/database/mongodb/index.md b/content/v1.10.x/connectors/database/mongodb/index.md index 18d52387f..4f66e974c 100644 --- a/content/v1.10.x/connectors/database/mongodb/index.md +++ b/content/v1.10.x/connectors/database/mongodb/index.md @@ -65,7 +65,7 @@ For a complete guide on managing secrets in hybrid setups, see the [Hybrid Inges - **Username**: Username to connect to Mongodb. This user must have access to perform `find` operation on collection and `listCollection` operations on database available in MongoDB. - **Password**: Password to connect to MongoDB. -- **Host Port**: When using the `mongodb` connecion schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. +- **Host Port**: When using the `mongodb` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. - **databaseName**: Optional name to give to the database in OpenMetadata. If left blank, we will use default as the database name. Using Atlas? Follow [this guide](https://www.mongodb.com/docs/guides/atlas/connection-string/) to get the connection string. diff --git a/content/v1.10.x/connectors/database/mongodb/yaml.md b/content/v1.10.x/connectors/database/mongodb/yaml.md index 406cfc6a8..35de6d846 100644 --- a/content/v1.10.x/connectors/database/mongodb/yaml.md +++ b/content/v1.10.x/connectors/database/mongodb/yaml.md @@ -75,7 +75,7 @@ This is a sample config for MongoDB: {% codeInfo srNumber=3 %} -**hostPort**: When using the `mongodb` connecion schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. +**hostPort**: When using the `mongodb` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname:port`. E.g., `localhost:27017`. When using the `mongodb+srv` connection schema, the hostPort parameter specifies the host and port of the MongoDB. This should be specified as a string in the format `hostname`. E.g., `cluster0-abcde.mongodb.net`. Using Atlas? Follow [this guide](https://www.mongodb.com/docs/guides/atlas/connection-string/) to get the connection string. diff --git a/content/v1.10.x/connectors/database/redshift/index.md b/content/v1.10.x/connectors/database/redshift/index.md index 1d64adece..3a368cea1 100644 --- a/content/v1.10.x/connectors/database/redshift/index.md +++ b/content/v1.10.x/connectors/database/redshift/index.md @@ -72,7 +72,7 @@ For the usage and lineage workflow, the user will need `SELECT` privilege on `ST } /%} -It is recommmended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. +It is recommended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. {% stepsContainer %} {% extraContent parentTagName="stepsContainer" %} diff --git a/content/v1.10.x/connectors/database/redshift/troubleshooting.md b/content/v1.10.x/connectors/database/redshift/troubleshooting.md index 87a8ecb31..d59e4083d 100644 --- a/content/v1.10.x/connectors/database/redshift/troubleshooting.md +++ b/content/v1.10.x/connectors/database/redshift/troubleshooting.md @@ -23,7 +23,7 @@ src="/images/v1.10/connectors/redshift/service-connection-arguments.png" alt="Configure service connection" caption="Configure the service connection by filling the form" /%} -### Metdata Ingestion Failure +### Metadata Ingestion Failure If your metadata ingesiton fails and you have errors like: diff --git a/content/v1.10.x/connectors/database/redshift/yaml.md b/content/v1.10.x/connectors/database/redshift/yaml.md index 40ad1f985..4a75c8a30 100644 --- a/content/v1.10.x/connectors/database/redshift/yaml.md +++ b/content/v1.10.x/connectors/database/redshift/yaml.md @@ -68,7 +68,7 @@ The workflow is modeled around the following **Note:** During the metadata ingestion for redshift, the tables in which the distribution style i.e `DISTSTYLE` is not `AUTO` will be marked as partitioned tables -It is recommmended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. +It is recommended to exclude the schema "information_schema" from the metadata ingestion as it contains system tables and views. ### 1. Define the YAML Config diff --git a/content/v1.10.x/connectors/ingestion/workflows/metadata/incremental-extraction/index.md b/content/v1.10.x/connectors/ingestion/workflows/metadata/incremental-extraction/index.md index f72c65662..821ede5de 100644 --- a/content/v1.10.x/connectors/ingestion/workflows/metadata/incremental-extraction/index.md +++ b/content/v1.10.x/connectors/ingestion/workflows/metadata/incremental-extraction/index.md @@ -14,7 +14,7 @@ The default Metadata Ingestion roughly follows these steps: While on one hand this is a great simple way of doing things that works for most use cases since at every ingestion pipeline run we get the whole Source state, on other hand this is fetching and comparing a lot of data without need since if there were no structural changes we already know there is nothing to update on OpenMetadata. -We implemented the Incremental Extraction feature to improve the performance by diminishing the extraction and comparison of uneeded data. +We implemented the Incremental Extraction feature to improve the performance by diminishing the extraction and comparison of unneeded data. How this is done depends a lot on the Source itself, but the general idea is to follow these steps: diff --git a/content/v1.10.x/connectors/pipeline/domo-pipeline/yaml.md b/content/v1.10.x/connectors/pipeline/domo-pipeline/yaml.md index 4d3906d55..31e17dab9 100644 --- a/content/v1.10.x/connectors/pipeline/domo-pipeline/yaml.md +++ b/content/v1.10.x/connectors/pipeline/domo-pipeline/yaml.md @@ -24,7 +24,7 @@ Configure and schedule Domo Pipeline metadata and profiler workflows from the Op ## Requirements -**Note:** For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add at least `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/content/v1.10.x/connectors/pipeline/openlineage/index.md b/content/v1.10.x/connectors/pipeline/openlineage/index.md index 74e874feb..1c00712f9 100644 --- a/content/v1.10.x/connectors/pipeline/openlineage/index.md +++ b/content/v1.10.x/connectors/pipeline/openlineage/index.md @@ -134,7 +134,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', @@ -200,7 +200,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', @@ -264,7 +264,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', diff --git a/content/v1.10.x/connectors/troubleshoot/index.md b/content/v1.10.x/connectors/troubleshoot/index.md index c18d83829..7438f5848 100644 --- a/content/v1.10.x/connectors/troubleshoot/index.md +++ b/content/v1.10.x/connectors/troubleshoot/index.md @@ -177,8 +177,8 @@ In Connection details section page click on Create a personal access token. {% image src="/images/v1.10/connectors/databricks/Open-create-tocken-page.png" -alt="Open create tocken" -caption="Open create tocken" /%} +alt="Open create token" +caption="Open create token" /%} @@ -187,8 +187,8 @@ Now In this page you can create new `token`. {% image src="/images/v1.10/connectors/databricks/Generate-token.png" -alt="Generate tocken" -caption="Generate tocken" /%} +alt="Generate token" +caption="Generate token" /%} ## Domo Database @@ -395,7 +395,7 @@ src="/images/v1.10/connectors/redshift/service-connection-arguments.png" alt="Configure service connection" caption="Configure the service connection by filling the form" /%} -### Metdata Ingestion Failure +### Metadata Ingestion Failure If your metadata ingesiton fails and you have errors like: diff --git a/content/v1.10.x/deployment/kubernetes/aks.md b/content/v1.10.x/deployment/kubernetes/aks.md index 5abb73493..2c5265c3c 100644 --- a/content/v1.10.x/deployment/kubernetes/aks.md +++ b/content/v1.10.x/deployment/kubernetes/aks.md @@ -184,7 +184,7 @@ kubectl create secret generic postgresql-secret ``` ### Step 6 - Install Openmetadata dependencies -The values-dependencies-yaml is used to overwride default values in the official helm chart and must be configured for customizing for use cases. Uncomment the externalDatabase section with meaningful values to connect to external database for production deployments. We set sensitive information like host address, DB name and DB username through the CLI. +The values-dependencies-yaml is used to override default values in the official helm chart and must be configured for customizing for use cases. Uncomment the externalDatabase section with meaningful values to connect to external database for production deployments. We set sensitive information like host address, DB name and DB username through the CLI. ```yaml # values-dependencies.yaml diff --git a/content/v1.10.x/developers/contribute/developing-a-new-connector/define-json-schema.md b/content/v1.10.x/developers/contribute/developing-a-new-connector/define-json-schema.md index 8067613ee..1a5089530 100644 --- a/content/v1.10.x/developers/contribute/developing-a-new-connector/define-json-schema.md +++ b/content/v1.10.x/developers/contribute/developing-a-new-connector/define-json-schema.md @@ -124,7 +124,7 @@ On this connector we can see two different definitions: {% codeInfo srNumber=6 %} -* **additionalProperties**: To avoid werid behavior, we always prevent additionalProperties to be passed to the schema by setting this parameter to false. +* **additionalProperties**: To avoid weird behavior, we always prevent additionalProperties to be passed to the schema by setting this parameter to false. * **required**: Here we can define any properties that are always required or the schema would be invalid otherwise diff --git a/content/v1.10.x/how-to-guides/data-quality-observability/quality/tests-yaml.md b/content/v1.10.x/how-to-guides/data-quality-observability/quality/tests-yaml.md index 870cbd3fc..a004721c2 100644 --- a/content/v1.10.x/how-to-guides/data-quality-observability/quality/tests-yaml.md +++ b/content/v1.10.x/how-to-guides/data-quality-observability/quality/tests-yaml.md @@ -571,7 +571,7 @@ Accuracy **Properties** -* `column`: the column that will be used to chech the table freshness +* `column`: the column that will be used to check the table freshness * `timeSinceUpdate`: (in seconds) The data is expected to be updated within this number of seconds. If the time since the last update is greater than this value, the test will fail. **Behavior** diff --git a/content/v1.10.x/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md b/content/v1.10.x/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md index b752fae19..3d580eb19 100644 --- a/content/v1.10.x/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md +++ b/content/v1.10.x/main-concepts/metadata-standard/schemas/email/templateValidationReponse.md @@ -3,7 +3,7 @@ title: templateValidationReponse slug: /main-concepts/metadata-standard/schemas/email/templatevalidationreponse --- -# Email Template Validation Reponse +# Email Template Validation Response *Schema defining email templates.* diff --git a/content/v1.10.x/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md b/content/v1.10.x/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md index 1e43e63e7..7a297ba89 100644 --- a/content/v1.10.x/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md +++ b/content/v1.10.x/main-concepts/metadata-standard/schemas/entity/feed/customProperty.md @@ -5,7 +5,7 @@ slug: /main-concepts/metadata-standard/schemas/entity/feed/customproperty # CustomPropertyFeedInfo -*This schema defines the custom properties addition/deltion schema on feed.* +*This schema defines the custom properties addition/deletion schema on feed.* ## Properties diff --git a/content/v1.10.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md b/content/v1.10.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md index a23d99b6c..4a05429d1 100644 --- a/content/v1.10.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md +++ b/content/v1.10.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/entity/services/connections/dashb ## Properties - **`type`**: Service Type. Refer to *#/definitions/qlikSenseType*. Default: `QlikSense`. -- **`displayUrl`** *(string)*: Qlik Sense Base URL, used for genrating dashboard & chat url. +- **`displayUrl`** *(string)*: Qlik Sense Base URL, used for generating dashboard & chat url. - **`hostPort`** *(string)*: URL for the Qlik instance. - **`certificates`** - **`validateHostName`** *(boolean)*: Validate Host Name. Default: `False`. diff --git a/content/v1.10.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md b/content/v1.10.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md index 8e6c5ca1d..960a9f3ed 100644 --- a/content/v1.10.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md +++ b/content/v1.10.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceAutoClassificationPipeline.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/metadataingestion/databaseservice ## Properties - **`type`**: Pipeline type. Refer to *#/definitions/autoClassificationConfigType*. Default: `AutoClassification`. -- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, gloassary pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. +- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, glossary pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`schemaFilterPattern`**: Regex to only fetch tables or databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`tableFilterPattern`**: Regex exclude tables or databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`databaseFilterPattern`**: Regex to only fetch databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. diff --git a/content/v1.10.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md b/content/v1.10.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md index 1cc51f5da..ded755a26 100644 --- a/content/v1.10.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md +++ b/content/v1.10.x/main-concepts/metadata-standard/schemas/metadataIngestion/databaseServiceProfilerPipeline.md @@ -11,7 +11,7 @@ slug: /main-concepts/metadata-standard/schemas/metadataingestion/databaseservice - **`type`**: Pipeline type. Refer to *#/definitions/profilerConfigType*. Default: `Profiler`. - **`processingEngine`**: Refer to *#/definitions/processingEngine*. -- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, gloassary pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. +- **`classificationFilterPattern`**: Regex to only compute metrics for table that matches the given tag, tiers, glossary pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`schemaFilterPattern`**: Regex to only fetch tables or databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`tableFilterPattern`**: Regex exclude tables or databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. - **`databaseFilterPattern`**: Regex to only fetch databases that matches the pattern. Refer to *../type/filterPattern.json#/definitions/filterPattern*. diff --git a/content/v1.10.x/sdk/go/index.md b/content/v1.10.x/sdk/go/index.md index 0499ca32e..cb8db2bec 100644 --- a/content/v1.10.x/sdk/go/index.md +++ b/content/v1.10.x/sdk/go/index.md @@ -24,7 +24,7 @@ To create OpenMetadata Gateway, you will need to establish a connection with *Op * `BaseURL`: The url on which your instance of OpenMetadata is up and running (include the port if you need to e.g. http://localhost:8585). * `APIVersion`: pass an empty string -- this will be `v1` for now. * `Retry`: number of time the request should retry if the status code returned is in `RetryCodes`. Use `0` to use the default value -* `RetryWait`: number of second to wait betwee retries. Pass 0 to use the default value +* `RetryWait`: number of second to wait between retries. Pass 0 to use the default value * `RetryCodes`: HTTP status that will trigger a retry. Pass `nil` to use the default * `AuthTokenMode`: defaults to `Bearer` * `AccessToken`: JWT token use to authenticate the request diff --git a/content/v1.11.x-SNAPSHOT/connectors/ingestion/workflows/metadata/incremental-extraction/index.md b/content/v1.11.x-SNAPSHOT/connectors/ingestion/workflows/metadata/incremental-extraction/index.md index f72c65662..821ede5de 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/ingestion/workflows/metadata/incremental-extraction/index.md +++ b/content/v1.11.x-SNAPSHOT/connectors/ingestion/workflows/metadata/incremental-extraction/index.md @@ -14,7 +14,7 @@ The default Metadata Ingestion roughly follows these steps: While on one hand this is a great simple way of doing things that works for most use cases since at every ingestion pipeline run we get the whole Source state, on other hand this is fetching and comparing a lot of data without need since if there were no structural changes we already know there is nothing to update on OpenMetadata. -We implemented the Incremental Extraction feature to improve the performance by diminishing the extraction and comparison of uneeded data. +We implemented the Incremental Extraction feature to improve the performance by diminishing the extraction and comparison of unneeded data. How this is done depends a lot on the Source itself, but the general idea is to follow these steps: diff --git a/content/v1.11.x-SNAPSHOT/connectors/pipeline/openlineage/index.md b/content/v1.11.x-SNAPSHOT/connectors/pipeline/openlineage/index.md index 727130a9e..c562d1b64 100644 --- a/content/v1.11.x-SNAPSHOT/connectors/pipeline/openlineage/index.md +++ b/content/v1.11.x-SNAPSHOT/connectors/pipeline/openlineage/index.md @@ -134,7 +134,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', @@ -200,7 +200,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', @@ -264,7 +264,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', diff --git a/content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md b/content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md index 8624337a6..6b787ac44 100644 --- a/content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md +++ b/content/v1.11.x-SNAPSHOT/deployment/kubernetes/aks.md @@ -184,7 +184,7 @@ kubectl create secret generic postgresql-secret ``` ### Step 6 - Install Openmetadata dependencies -The values-dependencies-yaml is used to overwride default values in the official helm chart and must be configured for customizing for use cases. Uncomment the externalDatabase section with meaningful values to connect to external database for production deployments. We set sensitive information like host address, DB name and DB username through the CLI. +The values-dependencies-yaml is used to override default values in the official helm chart and must be configured for customizing for use cases. Uncomment the externalDatabase section with meaningful values to connect to external database for production deployments. We set sensitive information like host address, DB name and DB username through the CLI. ```yaml # values-dependencies.yaml diff --git a/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/define-json-schema.md b/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/define-json-schema.md index 8067613ee..1a5089530 100644 --- a/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/define-json-schema.md +++ b/content/v1.11.x-SNAPSHOT/developers/contribute/developing-a-new-connector/define-json-schema.md @@ -124,7 +124,7 @@ On this connector we can see two different definitions: {% codeInfo srNumber=6 %} -* **additionalProperties**: To avoid werid behavior, we always prevent additionalProperties to be passed to the schema by setting this parameter to false. +* **additionalProperties**: To avoid weird behavior, we always prevent additionalProperties to be passed to the schema by setting this parameter to false. * **required**: Here we can define any properties that are always required or the schema would be invalid otherwise diff --git a/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/quality/tests-yaml.md b/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/quality/tests-yaml.md index 870cbd3fc..a004721c2 100644 --- a/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/quality/tests-yaml.md +++ b/content/v1.11.x-SNAPSHOT/how-to-guides/data-quality-observability/quality/tests-yaml.md @@ -571,7 +571,7 @@ Accuracy **Properties** -* `column`: the column that will be used to chech the table freshness +* `column`: the column that will be used to check the table freshness * `timeSinceUpdate`: (in seconds) The data is expected to be updated within this number of seconds. If the time since the last update is greater than this value, the test will fail. **Behavior** diff --git a/content/v1.9.x/connectors/ingestion/workflows/metadata/incremental-extraction/index.md b/content/v1.9.x/connectors/ingestion/workflows/metadata/incremental-extraction/index.md index f72c65662..821ede5de 100644 --- a/content/v1.9.x/connectors/ingestion/workflows/metadata/incremental-extraction/index.md +++ b/content/v1.9.x/connectors/ingestion/workflows/metadata/incremental-extraction/index.md @@ -14,7 +14,7 @@ The default Metadata Ingestion roughly follows these steps: While on one hand this is a great simple way of doing things that works for most use cases since at every ingestion pipeline run we get the whole Source state, on other hand this is fetching and comparing a lot of data without need since if there were no structural changes we already know there is nothing to update on OpenMetadata. -We implemented the Incremental Extraction feature to improve the performance by diminishing the extraction and comparison of uneeded data. +We implemented the Incremental Extraction feature to improve the performance by diminishing the extraction and comparison of unneeded data. How this is done depends a lot on the Source itself, but the general idea is to follow these steps: diff --git a/content/v1.9.x/connectors/pipeline/openlineage/index.md b/content/v1.9.x/connectors/pipeline/openlineage/index.md index c21f5e28d..597813c4c 100644 --- a/content/v1.9.x/connectors/pipeline/openlineage/index.md +++ b/content/v1.9.x/connectors/pipeline/openlineage/index.md @@ -134,7 +134,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', @@ -200,7 +200,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', @@ -264,7 +264,7 @@ openlineage_service_request = CreatePipelineServiceRequest( poolTimeout=3.0, sessionTimeout=60, securityProtocol=KafkaSecurityProtocol.SSL, - # below ssl confing in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL + # below ssl config in optional and used only when securityProtocol=KafkaSecurityProtocol.SSL sslConfig=ValidateSslClientConfig( sslCertificate='/path/to/kafka/certs/Certificate.pem', sslKey='/path/to/kafka/certs/Key.pem', diff --git a/content/v1.9.x/deployment/kubernetes/aks.md b/content/v1.9.x/deployment/kubernetes/aks.md index 4cc29ad3c..402b87b84 100644 --- a/content/v1.9.x/deployment/kubernetes/aks.md +++ b/content/v1.9.x/deployment/kubernetes/aks.md @@ -184,7 +184,7 @@ kubectl create secret generic postgresql-secret ``` ### Step 6 - Install Openmetadata dependencies -The values-dependencies-yaml is used to overwride default values in the official helm chart and must be configured for customizing for use cases. Uncomment the externalDatabase section with meaningful values to connect to external database for production deployments. We set sensitive information like host address, DB name and DB username through the CLI. +The values-dependencies-yaml is used to override default values in the official helm chart and must be configured for customizing for use cases. Uncomment the externalDatabase section with meaningful values to connect to external database for production deployments. We set sensitive information like host address, DB name and DB username through the CLI. ```yaml # values-dependencies.yaml diff --git a/content/v1.9.x/developers/contribute/developing-a-new-connector/define-json-schema.md b/content/v1.9.x/developers/contribute/developing-a-new-connector/define-json-schema.md index 8067613ee..1a5089530 100644 --- a/content/v1.9.x/developers/contribute/developing-a-new-connector/define-json-schema.md +++ b/content/v1.9.x/developers/contribute/developing-a-new-connector/define-json-schema.md @@ -124,7 +124,7 @@ On this connector we can see two different definitions: {% codeInfo srNumber=6 %} -* **additionalProperties**: To avoid werid behavior, we always prevent additionalProperties to be passed to the schema by setting this parameter to false. +* **additionalProperties**: To avoid weird behavior, we always prevent additionalProperties to be passed to the schema by setting this parameter to false. * **required**: Here we can define any properties that are always required or the schema would be invalid otherwise diff --git a/content/v1.9.x/how-to-guides/data-quality-observability/quality/tests-yaml.md b/content/v1.9.x/how-to-guides/data-quality-observability/quality/tests-yaml.md index 870cbd3fc..a004721c2 100644 --- a/content/v1.9.x/how-to-guides/data-quality-observability/quality/tests-yaml.md +++ b/content/v1.9.x/how-to-guides/data-quality-observability/quality/tests-yaml.md @@ -571,7 +571,7 @@ Accuracy **Properties** -* `column`: the column that will be used to chech the table freshness +* `column`: the column that will be used to check the table freshness * `timeSinceUpdate`: (in seconds) The data is expected to be updated within this number of seconds. If the time since the last update is greater than this value, the test will fail. **Behavior**