diff --git a/.openpublishing.redirection.azure-monitor.json b/.openpublishing.redirection.azure-monitor.json index 6e782eb19223..83639878a61a 100644 --- a/.openpublishing.redirection.azure-monitor.json +++ b/.openpublishing.redirection.azure-monitor.json @@ -65,6 +65,11 @@ "redirect_url": "/azure/azure-monitor/logs/cost-logs", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/azure-monitor/logs/design-logs-deployment.md", + "redirect_url": "/azure/azure-monitor/logs/workspace-design", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/azure-monitor/app/apm-tables.md", "redirect_url": "/azure/azure-monitor/app/convert-classic-resource#workspace-based-resource-changes", @@ -94,6 +99,26 @@ "source_path_from_root": "/articles/azure-monitor/containers/container-insights-azure-redhat4-setup.md" , "redirect_url": "/azure/azure-monitor/containers/container-insights-transition-hybrid", "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-metric-overview.md" , + "redirect_url": "/azure/azure-monitor/alerts/alert-types.md#metric-alerts", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-managing-alert-instances.md" , + "redirect_url": "/azure/azure-monitor/alerts/alerts-page.md", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/alerts-unified-log.md" , + "redirect_url": "/azure/azure-monitor/alerts/alert-types.md#log-alerts", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-monitor/alerts/activity-log-alerts.md" , + "redirect_url": "/azure/azure-monitor/alerts/alert-types.md#activity-log-alerts", + "redirect_document_id": false } ] } \ No newline at end of file diff --git a/.openpublishing.redirection.defender-for-cloud.json b/.openpublishing.redirection.defender-for-cloud.json index 61af983b7034..8967979a552c 100644 --- a/.openpublishing.redirection.defender-for-cloud.json +++ b/.openpublishing.redirection.defender-for-cloud.json @@ -15,6 +15,11 @@ "redirect_url": "/azure/defender-for-cloud/policy-reference", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/security-center/security-center-identity-access.md", + "redirect_url": "/azure/defender-for-cloud/multi-factor-authentication-enforcement", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/security-center/security-center-policy-definitions.md", "redirect_url": "/azure/defender-for-cloud/policy-reference", diff --git a/.openpublishing.redirection.json b/.openpublishing.redirection.json index 388ad9930099..27bcbcde2b8e 100644 --- a/.openpublishing.redirection.json +++ b/.openpublishing.redirection.json @@ -27419,6 +27419,11 @@ "redirect_url": "/azure/web-application-firewall/afds/afds-overview", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/web-application-firewall/waf-cdn-create-portal.md", + "redirect_url": "/azure/web-application-firewall/cdn/cdn-overview", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/frontdoor/waf-faq.md", "redirect_url": "/azure/web-application-firewall/afds/waf-faq", @@ -43290,9 +43295,24 @@ "redirect_document_id": true }, { - "source_path_from_root": "/articles/aks/web-app-routing.md", - "redirect_url": "/azure/aks/intro-kubernetes", - "redirect_document_id": false + "source_path_from_root": "/articles/virtual-network/nat-gateway/tutorial-create-nat-gateway-portal.md", + "redirect_url": "/azure/virtual-network/nat-gateway/quickstart-create-nat-gateway-portal", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/virtual-network/nat-gateway/tutorial-create-nat-gateway-powershell.md", + "redirect_url": "/azure/virtual-network/nat-gateway/quickstart-create-nat-gateway-powershell", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/virtual-network/nat-gateway/tutorial-create-nat-gateway-cli.md", + "redirect_url": "/azure/virtual-network/nat-gateway/quickstart-create-nat-gateway-cli", + "redirect_document_id": true + }, + { + "source_path_from_root": "/articles/private-link/private-endpoint-static-ip-powershell.md", + "redirect_url": "/azure/private-link/create-private-endpoint-powershell", + "redirect_document_id": true } ] } diff --git a/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png b/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png index 2e750c2d521e..02b085c532df 100644 Binary files a/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png and b/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png differ diff --git a/articles/active-directory-b2c/quickstart-web-app-dotnet.md b/articles/active-directory-b2c/quickstart-web-app-dotnet.md index 3336f333b6f4..3e842f3a13a9 100644 --- a/articles/active-directory-b2c/quickstart-web-app-dotnet.md +++ b/articles/active-directory-b2c/quickstart-web-app-dotnet.md @@ -85,7 +85,7 @@ Azure Active Directory B2C provides functionality to allow users to update their The ASP.NET web application includes an Azure AD access token in the request to the protected web API resource to perform operations on the user's to-do list items. -You've successfully used your Azure AD B2C user account to make an authorized call an Azure AD B2C protected web API. +You've successfully used your Azure AD B2C user account to make an authorized call to an Azure AD B2C protected web API. ## Next steps diff --git a/articles/active-directory/app-provisioning/on-premises-application-provisioning-architecture.md b/articles/active-directory/app-provisioning/on-premises-application-provisioning-architecture.md index 27b2d2c2aa64..f6580cdfaf66 100644 --- a/articles/active-directory/app-provisioning/on-premises-application-provisioning-architecture.md +++ b/articles/active-directory/app-provisioning/on-premises-application-provisioning-architecture.md @@ -142,6 +142,14 @@ Microsoft provides direct support for the latest agent version and one version b ### Download link You can download the latest version of the agent using [this link](https://aka.ms/onpremprovisioningagent). +### 1.1.892.0 + +May 20th, 2022 - released for download + +#### Fixed issues + +- We added support for exporting changes to integer attributes, which benefits customers using the generic LDAP connector. + ### 1.1.846.0 April 11th, 2022 - released for download diff --git a/articles/active-directory/app-provisioning/use-scim-to-provision-users-and-groups.md b/articles/active-directory/app-provisioning/use-scim-to-provision-users-and-groups.md index 1575c83baafd..ce1d26ec4dc9 100644 --- a/articles/active-directory/app-provisioning/use-scim-to-provision-users-and-groups.md +++ b/articles/active-directory/app-provisioning/use-scim-to-provision-users-and-groups.md @@ -8,7 +8,7 @@ ms.service: active-directory ms.subservice: app-provisioning ms.workload: identity ms.topic: tutorial -ms.date: 04/13/2022 +ms.date: 05/25/2022 ms.author: kenwith ms.reviewer: arvinh --- @@ -1350,7 +1350,7 @@ The SCIM spec doesn't define a SCIM-specific scheme for authentication and autho |Username and password (not recommended or supported by Azure AD)|Easy to implement|Insecure - [Your Pa$$word doesn't matter](https://techcommunity.microsoft.com/t5/azure-active-directory-identity/your-pa-word-doesn-t-matter/ba-p/731984)|Not supported for new gallery or non-gallery apps.| |Long-lived bearer token|Long-lived tokens do not require a user to be present. They are easy for admins to use when setting up provisioning.|Long-lived tokens can be hard to share with an admin without using insecure methods such as email. |Supported for gallery and non-gallery apps. | |OAuth authorization code grant|Access tokens are much shorter-lived than passwords, and have an automated refresh mechanism that long-lived bearer tokens do not have. A real user must be present during initial authorization, adding a level of accountability. |Requires a user to be present. If the user leaves the organization, the token is invalid and authorization will need to be completed again.|Supported for gallery apps, but not non-gallery apps. However, you can provide an access token in the UI as the secret token for short term testing purposes. Support for OAuth code grant on non-gallery is in our backlog, in addition to support for configurable auth / token URLs on the gallery app.| -|OAuth client credentials grant|Access tokens are much shorter-lived than passwords, and have an automated refresh mechanism that long-lived bearer tokens do not have. Both the authorization code grant and the client credentials grant create the same type of access token, so moving between these methods is transparent to the API. Provisioning can be completely automated, and new tokens can be silently requested without user interaction. ||Not supported for gallery and non-gallery apps. Support is in our backlog.| +|OAuth client credentials grant|Access tokens are much shorter-lived than passwords, and have an automated refresh mechanism that long-lived bearer tokens do not have. Both the authorization code grant and the client credentials grant create the same type of access token, so moving between these methods is transparent to the API. Provisioning can be completely automated, and new tokens can be silently requested without user interaction. ||Supported for gallery apps, but not non-gallery apps. However, you can provide an access token in the UI as the secret token for short term testing purposes. Support for OAuth client credentials grant on non-gallery is in our backlog.| > [!NOTE] > It's not recommended to leave the token field blank in the Azure AD provisioning configuration custom app UI. The token generated is primarily available for testing purposes. diff --git a/articles/active-directory/authentication/concept-registration-mfa-sspr-combined.md b/articles/active-directory/authentication/concept-registration-mfa-sspr-combined.md index 79f05568ec44..92c2dfb10a88 100644 --- a/articles/active-directory/authentication/concept-registration-mfa-sspr-combined.md +++ b/articles/active-directory/authentication/concept-registration-mfa-sspr-combined.md @@ -6,7 +6,7 @@ services: active-directory ms.service: active-directory ms.subservice: authentication ms.topic: conceptual -ms.date: 03/1/2022 +ms.date: 05/24/2022 ms.author: justinha author: justinha @@ -73,6 +73,9 @@ Users can set one of the following options as the default Multi-Factor Authentic - Phone call - Text message +>[!NOTE] +>Virtual phone numbers are not supported for Voice calls or SMS messages. + Third party authenticator apps do not provide push notification. As we continue to add more authentication methods to Azure AD, those methods become available in combined registration. ## Combined registration modes diff --git a/articles/active-directory/authentication/how-to-mfa-number-match.md b/articles/active-directory/authentication/how-to-mfa-number-match.md index a01ffbe36d1d..60bc32c970a5 100644 --- a/articles/active-directory/authentication/how-to-mfa-number-match.md +++ b/articles/active-directory/authentication/how-to-mfa-number-match.md @@ -243,11 +243,10 @@ To enable number matching in the Azure AD portal, complete the following steps: ![Screenshot of enabling number match.](media/howto-authentication-passwordless-phone/enable-number-matching.png) >[!NOTE] ->[Least privilege role in Azure Active Directory - Multi-factor Authentication](https://docs.microsoft.com/azure/active-directory/roles/delegate-by-task#multi-factor-authentication) +>[Least privilege role in Azure Active Directory - Multi-factor Authentication](../roles/delegate-by-task.md#multi-factor-authentication) Number matching is not supported for Apple Watch notifications. Apple Watch need to use their phone to approve notifications when number matching is enabled. ## Next steps -[Authentication methods in Azure Active Directory - Microsoft Authenticator app](concept-authentication-authenticator-app.md) - +[Authentication methods in Azure Active Directory - Microsoft Authenticator app](concept-authentication-authenticator-app.md) \ No newline at end of file diff --git a/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md b/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md index 31dc70544f96..64af0e139410 100644 --- a/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md +++ b/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md @@ -23,7 +23,7 @@ There are multiple scenarios that organizations can now enable using filter for - **Restrict access to privileged resources**. For this example, lets say you want to allow access to Microsoft Azure Management from a user who is assigned a privilged role Global Admin, has satisfied multifactor authentication and accessing from a device that is [privileged or secure admin workstations](/security/compass/privileged-access-devices) and attested as compliant. For this scenario, organizations would create two Conditional Access policies: - Policy 1: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, and for Access controls, Grant access, but require multifactor authentication and require device to be marked as compliant. - - Policy 2: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, excluding a filter for devices using rule expression device.extensionAttribute1 equals SAW and for Access controls, Block. Learn how to [update extensionAttributes on an Azure AD device object](https://docs.microsoft.com/graph/api/device-update?view=graph-rest-1.0&tabs=http). + - Policy 2: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, excluding a filter for devices using rule expression device.extensionAttribute1 equals SAW and for Access controls, Block. Learn how to [update extensionAttributes on an Azure AD device object](/graph/api/device-update?tabs=http&view=graph-rest-1.0). - **Block access to organization resources from devices running an unsupported Operating System**. For this example, lets say you want to block access to resources from Windows OS version older than Windows 10. For this scenario, organizations would create the following Conditional Access policy: - All users, accessing all cloud apps, excluding a filter for devices using rule expression device.operatingSystem equals Windows and device.operatingSystemVersion startsWith "10.0" and for Access controls, Block. - **Do not require multifactor authentication for specific accounts on specific devices**. For this example, lets say you want to not require multifactor authentication when using service accounts on specific devices like Teams phones or Surface Hub devices. For this scenario, organizations would create the following two Conditional Access policies: @@ -148,4 +148,4 @@ The filter for devices condition in Conditional Access evaluates policy based on - [Update device Graph API](/graph/api/device-update?tabs=http) - [Conditional Access: Conditions](concept-conditional-access-conditions.md) - [Common Conditional Access policies](concept-conditional-access-policy-common.md) -- [Securing devices as part of the privileged access story](/security/compass/privileged-access-devices) +- [Securing devices as part of the privileged access story](/security/compass/privileged-access-devices) \ No newline at end of file diff --git a/articles/active-directory/develop/includes/desktop-app/quickstart-uwp.md b/articles/active-directory/develop/includes/desktop-app/quickstart-uwp.md index 93d43e5640d9..5da76160054b 100644 --- a/articles/active-directory/develop/includes/desktop-app/quickstart-uwp.md +++ b/articles/active-directory/develop/includes/desktop-app/quickstart-uwp.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: develop ms.topic: include ms.workload: identity -ms.date: 03/04/2022 +ms.date: 05/19/2022 ms.author: jmprieur ms.custom: aaddev, identityplatformtop40, "scenarios:getting-started", "languages:UWP", mode-api #Customer intent: As an application developer, I want to learn how my Universal Windows Platform (UWP) application can get an access token and call an API that's protected by the Microsoft identity platform. @@ -134,7 +134,7 @@ Some situations require forcing users to interact with the Microsoft identity pl - When two factor authentication is required ```csharp -authResult = await App.PublicClientApp.AcquireTokenInteractive(scopes) +authResult = await PublicClientApp.AcquireTokenInteractive(scopes) .ExecuteAsync(); ``` @@ -145,9 +145,9 @@ The `scopes` parameter contains the scopes being requested, such as `{ "user.rea Use the `AcquireTokenSilent` method to obtain tokens to access protected resources after the initial `AcquireTokenInteractive` method. You don’t want to require the user to validate their credentials every time they need to access a resource. Most of the time you want token acquisitions and renewal without any user interaction ```csharp -var accounts = await App.PublicClientApp.GetAccountsAsync(); +var accounts = await PublicClientApp.GetAccountsAsync(); var firstAccount = accounts.FirstOrDefault(); -authResult = await App.PublicClientApp.AcquireTokenSilent(scopes, firstAccount) +authResult = await PublicClientApp.AcquireTokenSilent(scopes, firstAccount) .ExecuteAsync(); ``` diff --git a/articles/active-directory/develop/includes/desktop-app/quickstart-windows-desktop.md b/articles/active-directory/develop/includes/desktop-app/quickstart-windows-desktop.md index a6e406c3f0b0..a2355021caf4 100644 --- a/articles/active-directory/develop/includes/desktop-app/quickstart-windows-desktop.md +++ b/articles/active-directory/develop/includes/desktop-app/quickstart-windows-desktop.md @@ -8,7 +8,7 @@ ms.service: active-directory ms.subservice: develop ms.topic: include ms.workload: identity -ms.date: 03/04/2022 +ms.date: 05/19/2022 ms.author: jmprieur ms.custom: aaddev, identityplatformtop40, mode-api #Customer intent: As an application developer, I want to learn how my Windows Presentation Foundation (WPF) application can get an access token and call an API that's protected by the Microsoft identity platform. @@ -134,7 +134,7 @@ Some situations require forcing users interact with the Microsoft identity platf - When two factor authentication is required ```csharp -authResult = await App.PublicClientApp.AcquireTokenInteractive(_scopes) +authResult = await app.AcquireTokenInteractive(_scopes) .ExecuteAsync(); ``` @@ -147,9 +147,9 @@ authResult = await App.PublicClientApp.AcquireTokenInteractive(_scopes) You don't want to require the user to validate their credentials every time they need to access a resource. Most of the time you want token acquisitions and renewal without any user interaction. You can use the `AcquireTokenSilent` method to obtain tokens to access protected resources after the initial `AcquireTokenInteractive` method: ```csharp -var accounts = await App.PublicClientApp.GetAccountsAsync(); +var accounts = await app.GetAccountsAsync(); var firstAccount = accounts.FirstOrDefault(); -authResult = await App.PublicClientApp.AcquireTokenSilent(scopes, firstAccount) +authResult = await app.AcquireTokenSilent(scopes, firstAccount) .ExecuteAsync(); ``` diff --git a/articles/active-directory/develop/msal-net-migration-confidential-client.md b/articles/active-directory/develop/msal-net-migration-confidential-client.md index 1a806c67f8e6..f3744f2ca404 100644 --- a/articles/active-directory/develop/msal-net-migration-confidential-client.md +++ b/articles/active-directory/develop/msal-net-migration-confidential-client.md @@ -5,7 +5,6 @@ description: Learn how to migrate a confidential client application from Azure A services: active-directory author: jmprieur manager: CelesteDG - ms.service: active-directory ms.subservice: develop ms.topic: how-to @@ -13,13 +12,13 @@ ms.workload: identity ms.date: 06/08/2021 ms.author: jmprieur ms.reviewer: saeeda, shermanouko -ms.custom: "devx-track-csharp, aaddev, has-adal-ref" +ms.custom: "devx-track-csharp, aaddev, has-adal-ref, kr2b-contr-experiment" #Customer intent: As an application developer, I want to migrate my confidential client app from ADAL.NET to MSAL.NET. --- # Migrate confidential client applications from ADAL.NET to MSAL.NET -This article describes how to migrate a confidential client application from Azure Active Directory Authentication Library for .NET (ADAL.NET) to Microsoft Authentication Library for .NET (MSAL.NET). Confidential client applications are web apps, web APIs, and daemon applications that call another service on their own behalf. For more information about confidential applications, see [Authentication flows and application scenarios](authentication-flows-app-scenarios.md). If your app is based on ASP.NET Core, use [Microsoft.Identity.Web](microsoft-identity-web.md). +In this how-to guide you'll migrate a confidential client application from Azure Active Directory Authentication Library for .NET (ADAL.NET) to Microsoft Authentication Library for .NET (MSAL.NET). Confidential client applications include web apps, web APIs, and daemon applications that call another service on their own behalf. For more information about confidential apps, see [Authentication flows and application scenarios](authentication-flows-app-scenarios.md). If your app is based on ASP.NET Core, see [Microsoft.Identity.Web](microsoft-identity-web.md). For app registrations: @@ -28,24 +27,24 @@ For app registrations: ## Migration steps -1. Find the code by using ADAL.NET in your app. +1. Find the code that uses ADAL.NET in your app. - The code that uses ADAL in a confidential client application instantiates `AuthenticationContext` and calls either `AcquireTokenByAuthorizationCode` or one override of `AcquireTokenAsync` with the following parameters: + The code that uses ADAL in a confidential client app instantiates `AuthenticationContext` and calls either `AcquireTokenByAuthorizationCode` or one override of `AcquireTokenAsync` with the following parameters: - A `resourceId` string. This variable is the app ID URI of the web API that you want to call. - An instance of `IClientAssertionCertificate` or `ClientAssertion`. This instance provides the client credentials for your app to prove the identity of your app. -1. After you've identified that you have apps that are using ADAL.NET, install the MSAL.NET NuGet package [Microsoft.Identity.Client](https://www.nuget.org/packages/Microsoft.Identity.Client) and update your project library references. For more information, see [Install a NuGet package](https://www.bing.com/search?q=install+nuget+package). If you want to use token cache serializers, also install [Microsoft.Identity.Web.TokenCache](https://www.nuget.org/packages/Microsoft.Identity.Web.TokenCache). +1. After you've identified that you have apps that are using ADAL.NET, install the MSAL.NET NuGet package [Microsoft.Identity.Client](https://www.nuget.org/packages/Microsoft.Identity.Client) and update your project library references. For more information, see [Install a NuGet package](https://www.bing.com/search?q=install+nuget+package). To use token cache serializers, install [Microsoft.Identity.Web.TokenCache](https://www.nuget.org/packages/Microsoft.Identity.Web.TokenCache). 1. Update the code according to the confidential client scenario. Some steps are common and apply across all the confidential client scenarios. Other steps are unique to each scenario. - The confidential client scenarios are: + Confidential client scenarios: - [Daemon scenarios](?tabs=daemon#migrate-daemon-apps) supported by web apps, web APIs, and daemon console applications. - [Web API calling downstream web APIs](?tabs=obo#migrate-a-web-api-that-calls-downstream-web-apis) supported by web APIs calling downstream web APIs on behalf of the user. - [Web app calling web APIs](?tabs=authcode#migrate-a-web-api-that-calls-downstream-web-apis) supported by web apps that sign in users and call a downstream web API. -You might have provided a wrapper around ADAL.NET to handle certificates and caching. This article uses the same approach to illustrate the process of migrating from ADAL.NET to MSAL.NET. However, this code is only for demonstration purposes. Don't copy/paste these wrappers or integrate them in your code as they are. +You might have provided a wrapper around ADAL.NET to handle certificates and caching. This guide uses the same approach to illustrate the process of migrating from ADAL.NET to MSAL.NET. However, this code is only for demonstration purposes. Don't copy/paste these wrappers or integrate them in your code as they are. ## [Daemon](#tab/daemon) @@ -60,13 +59,13 @@ The ADAL code for your app uses daemon scenarios if it contains a call to `Authe - A resource (app ID URI) as a first parameter - `IClientAssertionCertificate` or `ClientAssertion` as the second parameter -`AuthenticationContext.AcquireTokenAsync` doesn't have a parameter of type `UserAssertion`. If it does, then your app is a web API, and it's using the [web API calling downstream web APIs](?tabs=obo#migrate-a-web-api-that-calls-downstream-web-apis) scenario. +`AuthenticationContext.AcquireTokenAsync` doesn't have a parameter of type `UserAssertion`. If it does, then your app is a web API, and it uses the [web API calling downstream web APIs](?tabs=obo#migrate-a-web-api-that-calls-downstream-web-apis) scenario. #### Update the code of daemon scenarios [!INCLUDE [Common steps](includes/msal-net-adoption-steps-confidential-clients.md)] -In this case, we replace the call to `AuthenticationContext.AcquireTokenAsync` with a call to `IConfidentialClientApplication.AcquireTokenClient`. +In this case, replace the call to `AuthenticationContext.AcquireTokenAsync` with a call to `IConfidentialClientApplication.AcquireTokenClient`. Here's a comparison of ADAL.NET and MSAL.NET code for daemon scenarios: @@ -160,9 +159,9 @@ public partial class AuthWrapper #### Benefit from token caching -To benefit from the in-memory cache, the instance of `IConfidentialClientApplication` needs to be kept in a member variable. If you re-create the confidential client application each time you request a token, you won't benefit from the token cache. +To benefit from the in-memory cache, the instance of `IConfidentialClientApplication` must be kept in a member variable. If you re-create the confidential client app each time you request a token, you won't benefit from the token cache. -You'll need to serialize `AppTokenCache` if you choose not to use the default in-memory app token cache. Similarly, If you want to implement a distributed token cache, you'll need to serialize `AppTokenCache`. For details, see [Token cache for a web app or web API (confidential client application)](msal-net-token-cache-serialization.md?tabs=aspnet) and the sample [active-directory-dotnet-v1-to-v2/ConfidentialClientTokenCache](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). +You'll need to serialize `AppTokenCache` if you don't use the default in-memory app token cache. Similarly, If you want to implement a distributed token cache, serialize `AppTokenCache`. For details, see [Token cache for a web app or web API (confidential client application)](msal-net-token-cache-serialization.md?tabs=aspnet) and the sample [active-directory-dotnet-v1-to-v2/ConfidentialClientTokenCache](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). [Learn more about the daemon scenario](scenario-daemon-overview.md) and how it's implemented with MSAL.NET or Microsoft.Identity.Web in new applications. @@ -285,25 +284,25 @@ public partial class AuthWrapper #### Benefit from token caching -For token caching in OBOs, you need to use a distributed token cache. For details, see [Token cache for a web app or web API (confidential client application)](msal-net-token-cache-serialization.md?tabs=aspnet) and read through [sample code](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). +For token caching in OBOs, use a distributed token cache. For details, see [Token cache for a web app or web API (confidential client app)](msal-net-token-cache-serialization.md?tabs=aspnet) and read through [sample code](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). ```CSharp app.UseInMemoryTokenCaches(); // or a distributed token cache. ``` -[Learn more about web APIs calling downstream web APIs](scenario-web-api-call-api-overview.md) and how they're implemented with MSAL.NET or Microsoft.Identity.Web in new applications. +[Learn more about web APIs calling downstream web APIs](scenario-web-api-call-api-overview.md) and how they're implemented with MSAL.NET or Microsoft.Identity.Web in new apps. ## [Web app calling web APIs](#tab/authcode) ### Migrate a web app that calls web APIs -If your app uses ASP.NET Core, we strongly recommend that you update to Microsoft.Identity.Web, which processes everything for you. For a quick presentation, see the [Microsoft.Identity.Web announcement of general availability](https://github.com/AzureAD/microsoft-identity-web/wiki/1.0.0). For details about how to use it in a web app, see [Why use Microsoft.Identity.Web in web apps?](https://aka.ms/ms-id-web/webapp). +If your app uses ASP.NET Core, we strongly recommend that you update to Microsoft.Identity.Web because it processes everything for you. For a quick presentation, see the [Microsoft.Identity.Web announcement of general availability](https://github.com/AzureAD/microsoft-identity-web/wiki/1.0.0). For details about how to use it in a web app, see [Why use Microsoft.Identity.Web in web apps?](https://aka.ms/ms-id-web/webapp). -Web apps that sign in users and call web APIs on behalf of users use the OAuth2.0 [authorization code flow](v2-oauth2-auth-code-flow.md). Typically: +Web apps that sign in users and call web APIs on behalf of users employ the OAuth2.0 [authorization code flow](v2-oauth2-auth-code-flow.md). Typically: -1. The web app signs in a user by executing a first leg of the authorization code flow. It does this by going to the Microosft identity platform authorize endpoint. The user signs in and performs multifactor authentications if needed. As an outcome of this operation, the app receives the authorization code. The authentication library is not used at this stage. +1. The app signs in a user by executing a first leg of the authorization code flow by going to the Microsoft identity platform authorize endpoint. The user signs in and performs multi-factor authentications if needed. As an outcome of this operation, the app receives the authorization code. The authentication library isn't used at this stage. 1. The app executes the second leg of the authorization code flow. It uses the authorization code to get an access token, an ID token, and a refresh token. Your application needs to provide the `redirectUri` value, which is the URI where the Microsoft identity platform endpoint will provide the security tokens. After the app receives that URI, it typically calls `AcquireTokenByAuthorizationCode` for ADAL or MSAL to redeem the code and to get a token that will be stored in the token cache. -1. The app uses ADAL or MSAL to call `AcquireTokenSilent` so that it can get tokens for calling the necessary web APIs. This is done from the web app controllers. +1. The app uses ADAL or MSAL to call `AcquireTokenSilent` to get tokens for calling the necessary web APIs from the web app controllers. #### Find out if your code uses the auth code flow @@ -313,7 +312,7 @@ The ADAL code for your app uses auth code flow if it contains a call to `Authent [!INCLUDE [Common steps](includes/msal-net-adoption-steps-confidential-clients.md)] -In this case, we replace the call to `AuthenticationContext.AcquireTokenAsync` with a call to `IConfidentialClientApplication.AcquireTokenByAuthorizationCode`. +In this case, replace the call to `AuthenticationContext.AcquireTokenAsync` with a call to `IConfidentialClientApplication.AcquireTokenByAuthorizationCode`. Here's a comparison of sample authorization code flows for ADAL.NET and MSAL.NET: @@ -460,7 +459,7 @@ public partial class AuthWrapper #### Benefit from token caching -Because your web app uses `AcquireTokenByAuthorizationCode`, your app needs to use a distributed token cache for token caching. For details, see [Token cache for a web app or web API](msal-net-token-cache-serialization.md?tabs=aspnet) and read through [sample code](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). +Because your web app uses `AcquireTokenByAuthorizationCode`, it needs to use a distributed token cache for token caching. For details, see [Token cache for a web app or web API](msal-net-token-cache-serialization.md?tabs=aspnet) and read through [sample code](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). ```CSharp @@ -470,9 +469,9 @@ app.UseInMemoryTokenCaches(); // or a distributed token cache. #### Handling MsalUiRequiredException When your controller attempts to acquire a token silently for different -scopes/resources, MSAL.NET might throw an `MsalUiRequiredException`. This is expected if, for instance, the user needs to re-sign-in, or if the +scopes/resources, MSAL.NET might throw an `MsalUiRequiredException` as expected if the user needs to re-sign-in, or if the access to the resource requires more claims (because of a conditional access -policy for instance). For details on mitigation see how to [Handle errors and exceptions in MSAL.NET](msal-error-handling-dotnet.md). +policy). For details on mitigation see how to [Handle errors and exceptions in MSAL.NET](msal-error-handling-dotnet.md). [Learn more about web apps calling web APIs](scenario-web-app-call-api-overview.md) and how they're implemented with MSAL.NET or Microsoft.Identity.Web in new applications. @@ -482,14 +481,14 @@ policy for instance). For details on mitigation see how to [Handle errors and ex Key benefits of MSAL.NET for your app include: -- **Resilience**. MSAL.NET helps make your app resilient through the following: +- **Resilience**. MSAL.NET helps make your app resilient through: - - Azure AD Cached Credential Service (CCS) benefits. CCS operates as an Azure AD backup. - - Proactive renewal of tokens if the API that you call enables long-lived tokens through [continuous access evaluation](app-resilience-continuous-access-evaluation.md). + - Azure AD Cached Credential Service (CCS) benefits. CCS operates as an Azure AD backup. + - Proactive renewal of tokens if the API that you call enables long-lived tokens through [continuous access evaluation](app-resilience-continuous-access-evaluation.md). - **Security**. You can acquire Proof of Possession (PoP) tokens if the web API that you want to call requires it. For details, see [Proof Of Possession tokens in MSAL.NET](https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/wiki/Proof-Of-Possession-(PoP)-tokens) -- **Performance and scalability**. If you don't need to share your cache with ADAL.NET, disable the legacy cache compatibility when you're creating the confidential client application (`.WithLegacyCacheCompatibility(false)`). This increases the performance significantly. +- **Performance and scalability**. If you don't need to share your cache with ADAL.NET, disable the legacy cache compatibility when you're creating the confidential client application (`.WithLegacyCacheCompatibility(false)`) to significantly increase performance. ```csharp app = ConfidentialClientApplicationBuilder.Create(ClientId) @@ -516,14 +515,14 @@ If you get an exception with either of the following messages: > `subscriptions for the tenant. Check to make sure you have the correct tenant ID. Check with your subscription` > `administrator.` -You can troubleshoot the exception by using these steps: +Troubleshoot the exception using these steps: 1. Confirm that you're using the latest version of [MSAL.NET](https://www.nuget.org/packages/Microsoft.Identity.Client/). -1. Confirm that the authority host that you set when building the confidential client application and the authority host that you used with ADAL are similar. In particular, is it the same [cloud](msal-national-cloud.md) (Azure Government, Azure China 21Vianet, or Azure Germany)? +1. Confirm that the authority host that you set when building the confidential client app and the authority host that you used with ADAL are similar. In particular, is it the same [cloud](msal-national-cloud.md) (Azure Government, Azure China 21Vianet, or Azure Germany)? ### MsalClientException -In multi-tenant applications, you can have scenarios where you specify a common authority when building the application, but then want to target a specific tenant (for instance the tenant of the user) when calling a web API. Since MSAL.NET 4.37.0, when you specify `.WithAzureRegion` at the application creation, you can no longer specify the Authority using `.WithAuthority` during the token requests. If you do, you'll get the following error when updating from previous versions of MSAL.NET: +In multi-tenant apps, specify a common authority when building the app to target a specific tenant such as, the tenant of the user when calling a web API. Since MSAL.NET 4.37.0, when you specify `.WithAzureRegion` at the app creation, you can no longer specify the Authority using `.WithAuthority` during the token requests. If you do, you'll get the following error when updating from previous versions of MSAL.NET: `MsalClientException - "You configured WithAuthority at the request level, and also WithAzureRegion. This is not supported when the environment changes from application to request. Use WithTenantId at the request level instead."` diff --git a/articles/active-directory/develop/msal-net-token-cache-serialization.md b/articles/active-directory/develop/msal-net-token-cache-serialization.md index d844e6b6eabe..c15f81595a79 100644 --- a/articles/active-directory/develop/msal-net-token-cache-serialization.md +++ b/articles/active-directory/develop/msal-net-token-cache-serialization.md @@ -278,7 +278,7 @@ You can also specify options to limit the size of the in-memory token cache: #### Distributed caches -If you use `app.AddDistributedTokenCache`, the token cache is an adapter against the .NET `IDistributedCache` implementation. So you can choose between a SQL Server cache, a Redis cache, an Azure Cosmos DB cache, or any other cache implementing the [IDistributedCache](https://docs.microsoft.com/dotnet/api/microsoft.extensions.caching.distributed.idistributedcache?view=dotnet-plat-ext-6.0) interface. +If you use `app.AddDistributedTokenCache`, the token cache is an adapter against the .NET `IDistributedCache` implementation. So you can choose between a SQL Server cache, a Redis cache, an Azure Cosmos DB cache, or any other cache implementing the [IDistributedCache](/dotnet/api/microsoft.extensions.caching.distributed.idistributedcache?view=dotnet-plat-ext-6.0) interface. For testing purposes only, you may want to use `services.AddDistributedMemoryCache()`, an in-memory implementation of `IDistributedCache`. @@ -709,4 +709,4 @@ The following samples illustrate token cache serialization. | ------ | -------- | ----------- | |[active-directory-dotnet-desktop-msgraph-v2](https://github.com/azure-samples/active-directory-dotnet-desktop-msgraph-v2) | Desktop (WPF) | Windows Desktop .NET (WPF) application that calls the Microsoft Graph API. ![Diagram that shows a topology with a desktop app client flowing to Azure Active Directory by acquiring a token interactively and to Microsoft Graph.](media/msal-net-token-cache-serialization/topology.png)| |[active-directory-dotnet-v1-to-v2](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2) | Desktop (console) | Set of Visual Studio solutions that illustrate the migration of Azure AD v1.0 applications (using ADAL.NET) to Microsoft identity platform applications (using MSAL.NET). In particular, see [Token cache migration](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/blob/master/TokenCacheMigration/README.md) and [Confidential client token cache](https://github.com/Azure-Samples/active-directory-dotnet-v1-to-v2/tree/master/ConfidentialClientTokenCache). | -[ms-identity-aspnet-webapp-openidconnect](https://github.com/Azure-Samples/ms-identity-aspnet-webapp-openidconnect) | ASP.NET (net472) | Example of token cache serialization in an ASP.NET MVC application (using MSAL.NET). In particular, see [MsalAppBuilder](https://github.com/Azure-Samples/ms-identity-aspnet-webapp-openidconnect/blob/master/WebApp/Utils/MsalAppBuilder.cs). +[ms-identity-aspnet-webapp-openidconnect](https://github.com/Azure-Samples/ms-identity-aspnet-webapp-openidconnect) | ASP.NET (net472) | Example of token cache serialization in an ASP.NET MVC application (using MSAL.NET). In particular, see [MsalAppBuilder](https://github.com/Azure-Samples/ms-identity-aspnet-webapp-openidconnect/blob/master/WebApp/Utils/MsalAppBuilder.cs). \ No newline at end of file diff --git a/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-user.md b/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-user.md index 6102c64a3089..eff929a81bf2 100644 --- a/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-user.md +++ b/articles/active-directory/develop/multi-service-web-app-access-microsoft-graph-as-user.md @@ -130,7 +130,7 @@ Using the [Microsoft.Identity.Web library](https://github.com/AzureAD/microsoft- To see this code as part of a sample application, see the [sample on GitHub](https://github.com/Azure-Samples/ms-identity-easyauth-dotnet-storage-graphapi/tree/main/2-WebApp-graphapi-on-behalf). > [!NOTE] -> The Microsoft.Identity.Web library isn't required in your web app for basic authentication/authorization or to authenticate requests with Microsoft Graph. It's possible to [securely call downstream APIs](/azure/app-service/tutorial-auth-aad#call-api-securely-from-server-code) with only the App Service authentication/authorization module enabled. +> The Microsoft.Identity.Web library isn't required in your web app for basic authentication/authorization or to authenticate requests with Microsoft Graph. It's possible to [securely call downstream APIs](../../app-service/tutorial-auth-aad.md#call-api-securely-from-server-code) with only the App Service authentication/authorization module enabled. > > However, the App Service authentication/authorization is designed for more basic authentication scenarios. For more complex scenarios (handling custom claims, for example), you need the Microsoft.Identity.Web library or [Microsoft Authentication Library](msal-overview.md). There's a little more setup and configuration work in the beginning, but the Microsoft.Identity.Web library can run alongside the App Service authentication/authorization module. Later, when your web app needs to handle more complex scenarios, you can disable the App Service authentication/authorization module and Microsoft.Identity.Web will already be a part of your app. diff --git a/articles/active-directory/develop/multi-service-web-app-authentication-app-service.md b/articles/active-directory/develop/multi-service-web-app-authentication-app-service.md index ce52b322df7f..a6702d3e9b37 100644 --- a/articles/active-directory/develop/multi-service-web-app-authentication-app-service.md +++ b/articles/active-directory/develop/multi-service-web-app-authentication-app-service.md @@ -23,7 +23,7 @@ Learn how to enable authentication for your web app running on Azure App Service App Service provides built-in authentication and authorization support, so you can sign in users and access data by writing minimal or no code in your web app. Using the App Service authentication/authorization module isn't required, but helps simplify authentication and authorization for your app. This article shows how to secure your web app with the App Service authentication/authorization module by using Azure Active Directory (Azure AD) as the identity provider. -The authentication/authorization module is enabled and configured through the Azure portal and app settings. No SDKs, specific languages, or changes to application code are required.​ A variety of identity providers are supported, which includes Azure AD, Microsoft Account, Facebook, Google, and Twitter​​. When the authentication/authorization module is enabled, every incoming HTTP request passes through it before being handled by app code.​​ To learn more, see [Authentication and authorization in Azure App Service](/azure/app-service/overview-authentication-authorization.md). +The authentication/authorization module is enabled and configured through the Azure portal and app settings. No SDKs, specific languages, or changes to application code are required.​ A variety of identity providers are supported, which includes Azure AD, Microsoft Account, Facebook, Google, and Twitter​​. When the authentication/authorization module is enabled, every incoming HTTP request passes through it before being handled by app code.​​ To learn more, see [Authentication and authorization in Azure App Service](../../app-service/overview-authentication-authorization.md). In this tutorial, you learn how to: @@ -38,7 +38,7 @@ In this tutorial, you learn how to: ## Create and publish a web app on App Service -For this tutorial, you need a web app deployed to App Service. You can use an existing web app, or you can follow one of the [ASP.NET Core](/azure/app-service/quickstart-dotnetcore), [Node.js](/azure/app-service/quickstart-nodejs), [Python](/azure/app-service/quickstart-python), or [Java](/azure/app-service/quickstart-java) quickstarts to create and publish a new web app to App Service. +For this tutorial, you need a web app deployed to App Service. You can use an existing web app, or you can follow one of the [ASP.NET Core](../../app-service/quickstart-dotnetcore.md), [Node.js](../../app-service/quickstart-nodejs.md), [Python](../../app-service/quickstart-python.md), or [Java](../../app-service/quickstart-java.md) quickstarts to create and publish a new web app to App Service. Whether you use an existing web app or create a new one, take note of the following: @@ -49,7 +49,7 @@ You need these names throughout this tutorial. ## Configure authentication and authorization -You now have a web app running on App Service. Next, you enable authentication and authorization for the web app. You use Azure AD as the identity provider. For more information, see [Configure Azure AD authentication for your App Service application](/azure/app-service/configure-authentication-provider-aad.md). +You now have a web app running on App Service. Next, you enable authentication and authorization for the web app. You use Azure AD as the identity provider. For more information, see [Configure Azure AD authentication for your App Service application](../../app-service/configure-authentication-provider-aad.md). In the [Azure portal](https://portal.azure.com) menu, select **Resource groups**, or search for and select **Resource groups** from any page. diff --git a/articles/active-directory/develop/scenario-spa-acquire-token.md b/articles/active-directory/develop/scenario-spa-acquire-token.md index 87cc6370f709..5cafafffebb7 100644 --- a/articles/active-directory/develop/scenario-spa-acquire-token.md +++ b/articles/active-directory/develop/scenario-spa-acquire-token.md @@ -154,6 +154,8 @@ For success and failure of the silent token acquisition, MSAL Angular provides e import { MsalBroadcastService } from '@azure/msal-angular'; import { EventMessage, EventType } from '@azure/msal-browser'; +import { filter, Subject, takeUntil } from 'rxjs'; + // In app.component.ts export class AppComponent implements OnInit { private readonly _destroying$ = new Subject(); @@ -226,7 +228,7 @@ For success and failure of the silent token acquisition, MSAL Angular provides c ```javascript // In app.component.ts ngOnInit() { - this.subscription= this.broadcastService.subscribe("msal:acquireTokenFailure", (payload) => { + this.subscription = this.broadcastService.subscribe("msal:acquireTokenFailure", (payload) => { }); } ngOnDestroy() { @@ -394,15 +396,18 @@ You can use optional claims for the following purposes: To request optional claims in `IdToken`, you can send a stringified claims object to the `claimsRequest` field of the `AuthenticationParameters.ts` class. ```javascript -"optionalClaims": - { - "idToken": [ - { - "name": "auth_time", - "essential": true - } - ], - +var claims = { + optionalClaims: + { + idToken: [ + { + name: "auth_time", + essential: true + } + ], + } +}; + var request = { scopes: ["user.read"], claimsRequest: JSON.stringify(claims) diff --git a/articles/active-directory/external-identities/cross-cloud-settings.md b/articles/active-directory/external-identities/cross-cloud-settings.md index 907b1bd5c59c..66b5a7623d54 100644 --- a/articles/active-directory/external-identities/cross-cloud-settings.md +++ b/articles/active-directory/external-identities/cross-cloud-settings.md @@ -44,9 +44,16 @@ After each organization has completed these steps, Azure AD B2B collaboration be In your Microsoft cloud settings, enable the Microsoft Azure cloud you want to collaborate with. +> [!NOTE] +> The admin experience is currently still deploying to national clouds. To access the admin experience in Microsoft Azure Government or Microsoft Azure China, you can use these links: +> +>Microsoft Azure Government - https://aka.ms/cloudsettingsusgov +> +>Microsoft Azure China - https://aka.ms/cloudsettingschina + 1. Sign in to the [Azure portal](https://portal.azure.com) using a Global administrator or Security administrator account. Then open the **Azure Active Directory** service. 1. Select **External Identities**, and then select **Cross-tenant access settings (Preview)**. -1. Select **Cross cloud settings**. +1. Select **Microsoft cloud settings (Preview)**. 1. Select the checkboxes next to the external Microsoft Azure clouds you want to enable. ![Screenshot showing Microsoft cloud settings.](media/cross-cloud-settings/cross-cloud-settings.png) diff --git a/articles/active-directory/external-identities/cross-tenant-access-overview.md b/articles/active-directory/external-identities/cross-tenant-access-overview.md index 79006efa5192..309af5bfd30a 100644 --- a/articles/active-directory/external-identities/cross-tenant-access-overview.md +++ b/articles/active-directory/external-identities/cross-tenant-access-overview.md @@ -71,6 +71,13 @@ To set up B2B collaboration, both organizations configure their Microsoft cloud For configuration steps, see [Configure Microsoft cloud settings for B2B collaboration (Preview)](cross-cloud-settings.md). +> [!NOTE] +> The admin experience is currently still deploying to national clouds. To access the admin experience in Microsoft Azure Government or Microsoft Azure China, you can use these links: +> +>Microsoft Azure Government - https://aka.ms/cloudsettingsusgov +> +>Microsoft Azure China - https://aka.ms/cloudsettingschina + ### Default settings in cross-cloud scenarios To collaborate with a partner tenant in a different Microsoft Azure cloud, both organizations need to mutually enable B2B collaboration with each other. The first step is to enable the partner's cloud in your cross-tenant settings. When you first enable another cloud, B2B collaboration is blocked for all tenants in that cloud. You need to add the tenant you want to collaborate with to your Organizational settings, and at that point your default settings go into effect for that tenant only. You can allow the default settings to remain in effect, or you can modify the organizational settings for the tenant. diff --git a/articles/active-directory/fundamentals/5-secure-access-b2b.md b/articles/active-directory/fundamentals/5-secure-access-b2b.md index dd24d16b2494..c7a9bdc740dc 100644 --- a/articles/active-directory/fundamentals/5-secure-access-b2b.md +++ b/articles/active-directory/fundamentals/5-secure-access-b2b.md @@ -76,7 +76,7 @@ You can use an allowlist or blocklist to [restrict invitations to B2B users](../ > Limiting to a predefined domain may inadvertently prevent authorized collaboration with organizations, which have other domains for their users. For example, if doing business with an organization Contoso, the initial point of contact with Contoso might be one of their US-based employees who has an email with a ".com" domain. However, if you only allow the ".com" domain you may inadvertently omit their Canadian employees who have ".ca" domain. > [!IMPORTANT] -> These lists do not apply to users who are already in your directory. By default, they also do not apply to OneDrive for Business and SharePoint allow/blocklists which are separate unless you enable the [SharePoint/OneDrive B2B integration](https://docs.microsoft.com/sharepoint/sharepoint-azureb2b-integration). +> These lists do not apply to users who are already in your directory. By default, they also do not apply to OneDrive for Business and SharePoint allow/blocklists which are separate unless you enable the [SharePoint/OneDrive B2B integration](/sharepoint/sharepoint-azureb2b-integration). Some organizations use a list of known ‘bad actor’ domains provided by their managed security provider for their blocklist. For example, if the organization is legitimately doing business with Contoso and using a .com domain, there may be an unrelated organization that has been using the Contoso .org domain and attempting a phishing attack to impersonate Contoso employees. @@ -254,4 +254,4 @@ See the following articles on securing external access to resources. We recommen 8. [Secure access with Sensitivity labels](8-secure-access-sensitivity-labels.md) -9. [Secure access to Microsoft Teams, OneDrive, and SharePoint](9-secure-access-teams-sharepoint.md) +9. [Secure access to Microsoft Teams, OneDrive, and SharePoint](9-secure-access-teams-sharepoint.md) \ No newline at end of file diff --git a/articles/active-directory/fundamentals/add-custom-domain.md b/articles/active-directory/fundamentals/add-custom-domain.md index 8cf9ad9925ad..bd12103153c5 100644 --- a/articles/active-directory/fundamentals/add-custom-domain.md +++ b/articles/active-directory/fundamentals/add-custom-domain.md @@ -56,6 +56,8 @@ After you create your directory, you can add your custom domain name. >[!IMPORTANT] >You must include *.com*, *.net*, or any other top-level extension for this to work properly. + > + >When adding a custom domain, the Password Policy values will be inherited from the initial domain. The unverified domain is added. The **contoso.com** page appears showing your DNS information. Save this information. You need it later to create a TXT record to configure DNS. @@ -114,4 +116,4 @@ If Azure AD can't verify a custom domain name, try the following suggestions: - Manage your domain name information in Azure AD. For more information, see [Managing custom domain names](../enterprise-users/domains-manage.md). -- If you have on-premises versions of Windows Server that you want to use alongside Azure Active Directory, see [Integrate your on-premises directories with Azure Active Directory](../hybrid/whatis-hybrid-identity.md). \ No newline at end of file +- If you have on-premises versions of Windows Server that you want to use alongside Azure Active Directory, see [Integrate your on-premises directories with Azure Active Directory](../hybrid/whatis-hybrid-identity.md). diff --git a/articles/active-directory/fundamentals/whats-new.md b/articles/active-directory/fundamentals/whats-new.md index 1e638a150c0a..247970621f60 100644 --- a/articles/active-directory/fundamentals/whats-new.md +++ b/articles/active-directory/fundamentals/whats-new.md @@ -71,15 +71,6 @@ With a recent improvement, Smart Lockout now synchronizes the lockout state acro --- -### Public Preview - Enabling customization capabilities for the Self-Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icons in Company Branding. - -**Type:** New feature -**Service category:** Authentications (Logins) -**Product capability:** User Authentication - -Updating the Company Branding functionality on the Azure AD/Microsoft 365 sign-in experience to allow customizing Self Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icon. For more information, see: [Add branding to your organization’s Azure Active Directory sign-in page](customize-branding.md). - ---- ### Public Preview - Integration of Microsoft 365 App Certification details into AAD UX and Consent Experiences @@ -92,15 +83,6 @@ Microsoft 365 Certification status for an app is now available in Azure AD conse --- -### Public Preview - Organizations can replace all references to Microsoft on the AAD auth experience - -**Type:** New feature -**Service category:** Authentications (Logins) -**Product capability:** User Authentication - -Updating the Company Branding functionality on the Azure AD/Microsoft 365 sign-in experience to allow customizing Self Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icon. For more information, see: [Add branding to your organization’s Azure Active Directory sign-in page](customize-branding.md). - ---- ### Public preview - Use Azure AD access reviews to review access of B2B direct connect users in Teams shared channels diff --git a/articles/active-directory/governance/entitlement-management-logs-and-reporting.md b/articles/active-directory/governance/entitlement-management-logs-and-reporting.md index 13308b2917a9..9a50a5b979a8 100644 --- a/articles/active-directory/governance/entitlement-management-logs-and-reporting.md +++ b/articles/active-directory/governance/entitlement-management-logs-and-reporting.md @@ -55,7 +55,7 @@ Archiving Azure AD audit logs requires you to have Azure Monitor in an Azure sub ## View events for an access package -To view events for an access package, you must have access to the underlying Azure monitor workspace (see [Manage access to log data and workspaces in Azure Monitor](../../azure-monitor/logs/manage-access.md#manage-access-using-azure-permissions) for information) and in one of the following roles: +To view events for an access package, you must have access to the underlying Azure monitor workspace (see [Manage access to log data and workspaces in Azure Monitor](../../azure-monitor/logs/manage-access.md#azure-rbac) for information) and in one of the following roles: - Global administrator - Security administrator diff --git a/articles/active-directory/manage-apps/datawiza-with-azure-ad.md b/articles/active-directory/manage-apps/datawiza-with-azure-ad.md index 4f36ea3a01f3..718404ef1aa4 100644 --- a/articles/active-directory/manage-apps/datawiza-with-azure-ad.md +++ b/articles/active-directory/manage-apps/datawiza-with-azure-ad.md @@ -1,7 +1,7 @@ --- title: Secure hybrid access with Datawiza titleSuffix: Azure AD -description: In this tutorial, learn how to integrate Datawiza with Azure AD for secure hybrid access +description: Learn how to integrate Datawiza with Azure AD. See how to use Datawiza and Azure AD to authenticate users and give them access to on-premises and cloud apps. services: active-directory author: gargi-sinha manager: martinco @@ -9,81 +9,87 @@ ms.service: active-directory ms.subservice: app-mgmt ms.topic: how-to ms.workload: identity -ms.date: 8/27/2021 +ms.date: 05/19/2022 ms.author: gasinh ms.collection: M365-identity-device-management +ms.custom: kr2b-contr-experiment --- # Tutorial: Configure Datawiza with Azure Active Directory for secure hybrid access In this sample tutorial, learn how to integrate Azure Active Directory (Azure AD) with [Datawiza](https://www.datawiza.com/) for secure hybrid access. -Datawiza's [Datawiza Access Broker -(DAB)](https://www.datawiza.com/access-broker) extends Azure AD to enable Single Sign-on (SSO) and granular access controls to protect on-premise and cloud-hosted applications, such as Oracle E-Business Suite, Microsoft IIS, and SAP. +Datawiza's [Datawiza Access Broker (DAB)](https://www.datawiza.com/access-broker) extends Azure AD to enable single sign-on (SSO) and provide granular access controls to protect on-premises and cloud-hosted applications, such as Oracle E-Business Suite, Microsoft IIS, and SAP. -Using this solution enterprises can quickly transition from legacy Web Access Managers (WAMs), such as Symantec SiteMinder, NetIQ, Oracle, and IBM to Azure AD without rewriting applications. Enterprises can also use Datawiza as a no-code or low-code solution to integrate new applications to Azure AD. This saves engineering time, reduces cost significantly and delivers the project in a secured manner. +By using this solution, enterprises can quickly transition from legacy web access managers (WAMs), such as Symantec SiteMinder, NetIQ, Oracle, and IBM, to Azure AD without rewriting applications. Enterprises can also use Datawiza as a no-code or low-code solution to integrate new applications to Azure AD. This approach saves engineering time, reduces cost significantly, and delivers the project in a secured manner. ## Prerequisites -To get started, you'll need: +To get started, you need: - An Azure subscription. If you don\'t have a subscription, you can get a [trial account](https://azure.microsoft.com/free/). - An [Azure AD tenant](../fundamentals/active-directory-access-create-new-tenant.md) that's linked to your Azure subscription. -- [Docker](https://docs.docker.com/get-docker/) and -[docker-compose](https://docs.docker.com/compose/install/) -are required to run DAB. Your applications can run on any platform, such as the virtual machine and bare metal. +- [Docker](https://docs.docker.com/get-docker/) and [docker-compose](https://docs.docker.com/compose/install/), which are required to run DAB. Your applications can run on any platform, such as a virtual machine and bare metal. -- An application that you'll transition from a legacy identity system to Azure AD. In this example, DAB is deployed on the same server where the application is. The application will run on localhost: 3001 and DAB proxies traffic to the application via localhost: 9772. The traffic to the application will reach DAB first and then be proxied to the application. +- An application that you'll transition from a legacy identity system to Azure AD. In this example, DAB is deployed on the same server as the application. The application runs on localhost: 3001, and DAB proxies traffic to the application via localhost: 9772. The traffic to the application reaches DAB first and is then proxied to the application. ## Scenario description Datawiza integration includes the following components: -- [Azure AD](../fundamentals/active-directory-whatis.md) - Microsoft's cloud-based identity and access management service, which helps users sign in and access external and internal resources. +- [Azure AD](../fundamentals/active-directory-whatis.md) - A cloud-based identity and access management service from Microsoft. Azure AD helps users sign in and access external and internal resources. -- Datawiza Access Broker (DAB) - The service user sign on and transparently passes identity to applications through HTTP headers. +- Datawiza Access Broker (DAB) - The service that users sign on to. DAB transparently passes identity information to applications through HTTP headers. -- Datawiza Cloud Management Console (DCMC) - A centralized management console that manages DAB. DCMC provides UI and RESTful APIs for administrators to manage the configurations of DAB and its access control policies. +- Datawiza Cloud Management Console (DCMC) - A centralized management console that manages DAB. DCMC provides UI and RESTful APIs for administrators to manage the DAB configuration and access control policies. The following architecture diagram shows the implementation. -![image shows architecture diagram](./media/datawiza-with-azure-active-directory/datawiza-architecture-diagram.png) +![Architecture diagram that shows the authentication process that gives a user access to an on-premises application.](./media/datawiza-with-azure-active-directory/datawiza-architecture-diagram.png) -|Steps| Description| +|Step| Description| |:----------|:-----------| -| 1. | The user makes a request to access the on-premises or cloud-hosted application. DAB proxies the request made by the user to the application.| -| 2. |The DAB checks the user's authentication state. If it doesn't receive a session token, or the supplied session token is invalid, then it sends the user to Azure AD for authentication.| +| 1. | The user makes a request to access the on-premises or cloud-hosted application. DAB proxies the request made by the user to the application.| +| 2. | DAB checks the user's authentication state. If it doesn't receive a session token, or the supplied session token is invalid, it sends the user to Azure AD for authentication.| | 3. | Azure AD sends the user request to the endpoint specified during the DAB application's registration in the Azure AD tenant.| -| 4. | The DAB evaluates access policies and calculates attribute values to be included in HTTP headers forwarded to the application. During this step, the DAB may call out to the Identity provider to retrieve the information needed to set the header values correctly. The DAB sets the header values and sends the request to the application. | -| 5. | The user is now authenticated and has access to the application.| +| 4. | DAB evaluates access policies and calculates attribute values to be included in HTTP headers forwarded to the application. During this step, DAB may call out to the identity provider to retrieve the information needed to set the header values correctly. DAB sets the header values and sends the request to the application. | +| 5. | The user is authenticated and has access to the application.| ## Onboard with Datawiza -To integrate your on-premises or cloud-hosted application with Azure AD, login to [Datawiza Cloud Management +To integrate your on-premises or cloud-hosted application with Azure AD, sign in to [Datawiza Cloud Management Console](https://console.datawiza.com/) (DCMC). ## Create an application on DCMC -[Create an application](https://docs.datawiza.com/step-by-step/step2.html) and generate a key pair of `PROVISIONING_KEY` and `PROVISIONING_SECRET` for the application on the DCMC. +In the next step, you create an application on DCMC and generate a key pair for the app. The key pair consists of a `PROVISIONING_KEY` and `PROVISIONING_SECRET`. To create the app and generate the key pair, follow the instructions in [Datawiza Cloud Management Console](https://docs.datawiza.com/step-by-step/step2.html). -For Azure AD, Datawiza offers a convenient [One click integration](https://docs.datawiza.com/tutorial/web-app-azure-one-click.html). This method to integrate Azure AD with DCMC can create an application registration on your behalf in your Azure AD tenant. +For Azure AD, Datawiza offers a convenient [one-click integration](https://docs.datawiza.com/tutorial/web-app-azure-one-click.html). This method to integrate Azure AD with DCMC can create an application registration on your behalf in your Azure AD tenant. -![image shows configure idp](./media/datawiza-with-azure-active-directory/configure-idp.png) +![Screenshot of the Datawiza Configure I D P page. Boxes for name, protocol, and other values are visible. An automatic generator option is turned on.](./media/datawiza-with-azure-active-directory/configure-idp.png) -Instead, if you want to use an existing web application in your Azure AD tenant, you can disable the option and populate the fields of the form. You'll need the tenant ID, client ID, and client secret. [Create a web application and get these values in your tenant](https://docs.datawiza.com/idp/azure.html). +Instead, if you want to use an existing web application in your Azure AD tenant, you can disable the option and populate the fields of the form. You need the tenant ID, client ID, and client secret. For more information about creating a web application and getting these values, see [Microsoft Azure AD in the Datawiza documentation](https://docs.datawiza.com/idp/azure.html). -![image shows configure idp using form](./media/datawiza-with-azure-active-directory/use-form.png) +![Screenshot of the Datawiza Configure I D P page. Boxes for name, protocol, and other values are visible. An automatic generator option is turned off.](./media/datawiza-with-azure-active-directory/use-form.png) ## Run DAB with a header-based application -1. You can use either Docker or Kubernetes to run DAB. The docker image is needed for users to create a sample header-based application. [Configure DAB and SSO -integration](https://docs.datawiza.com/step-by-step/step3.html). [Deploy DAB with Kubernetes](https://docs.datawiza.com/tutorial/web-app-AKS.html). A sample docker image `docker-compose.yml` file is provided for you to download and use. [Log in to the container registry](https://docs.datawiza.com/step-by-step/step3.html#important-step) to download the images of DAB and the header-based application. +You can use either Docker or Kubernetes to run DAB. The docker image is needed to create a sample header-based application. - ```yaml - services: +To run DAB with a header-based application, follow these steps: + +1. Use either Docker or Kubernetes to run DAB: + + - For Docker-specific instructions, see [Deploy Datawiza Access Broker With Your App](https://docs.datawiza.com/step-by-step/step3.html). + - For Kubernetes-specific instructions, see [Deploy Datawiza Access Broker with a Web App using Kubernetes](https://docs.datawiza.com/tutorial/web-app-AKS.html). + + You can use the following sample docker image docker-compose.yml file: + + ```yaml + services: datawiza-access-broker: image: registry.gitlab.com/datawiza/access-broker container_name: datawiza-access-broker @@ -97,34 +103,37 @@ integration](https://docs.datawiza.com/step-by-step/step3.html). [Deploy DAB wit header-based-app: image: registry.gitlab.com/datawiza/header-based-app restart: always - ports: - - "3001:3001" + ports: + - "3001:3001" ``` -2. After executing `docker-compose -f docker-compose.yml up`, the -header-based application should have SSO enabled with Azure AD. Open a browser and type in `http://localhost:9772/`. +1. To sign in to the container registry and download the images of DAB and the header-based application, follow the instructions in [Important Step](https://docs.datawiza.com/step-by-step/step3.html#important-step). -3. An Azure AD login page will show up. +1. Run the following command: -## Pass user attributes to the header-based application + `docker-compose -f docker-compose.yml up` -1. DAB gets user attributes from IdP and can pass the user attributes to the application via header or cookie. See the instructions on how to [pass user attributes](https://docs.datawiza.com/step-by-step/step4.html) such as email address, firstname, and lastname to the header-based application. + The header-based application should now have SSO enabled with Azure AD. -2. After successfully configuring the user attributes, you should see the green check sign for each of the user attributes. +1. In a browser, go to `http://localhost:9772/`. An Azure AD sign-in page appears. - ![image shows datawiza application home page](./media/datawiza-with-azure-active-directory/datawiza-application-home-page.png) +## Pass user attributes to the header-based application -## Test the flow +DAB gets user attributes from Azure AD and can pass these attributes to the application via a header or cookie. + +To pass user attributes such as an email address, a first name, and a last name to the header-based application, follow the instructions in [Pass User Attributes](https://docs.datawiza.com/step-by-step/step4.html). -1. Navigate to the application URL. +After successfully configuring the user attributes, you should see a green check mark next to each attribute. -2. The DAB should redirect to the Azure AD login page. +![Screenshot that shows the Datawiza application home page. Green check marks are visible next to the host, email, firstname, and lastname attributes.](./media/datawiza-with-azure-active-directory/datawiza-application-home-page.png) + +## Test the flow -3. After successfully authenticating, you should be redirected to DAB. +1. Go to the application URL. DAB should redirect you to the Azure AD sign-in page. -4. The DAB evaluates policies, calculates headers, and sends the user to the upstream application. +1. After successfully authenticating, you should be redirected to DAB. -5. Your requested application should show up. +DAB evaluates policies, calculates headers, and sends you to the upstream application. Your requested application should appear. ## Next steps diff --git a/articles/active-directory/manage-apps/migrate-okta-federation-to-azure-active-directory.md b/articles/active-directory/manage-apps/migrate-okta-federation-to-azure-active-directory.md index 04c00fa4e0ed..9cbd7fd01502 100644 --- a/articles/active-directory/manage-apps/migrate-okta-federation-to-azure-active-directory.md +++ b/articles/active-directory/manage-apps/migrate-okta-federation-to-azure-active-directory.md @@ -1,17 +1,17 @@ --- -title: Tutorial to migrate Okta federation to Azure Active Directory-managed authentication +title: Migrate Okta federation to Azure Active Directory titleSuffix: Active Directory -description: Learn how to migrate your Okta federated applications to Azure AD-managed authentication. +description: Learn how to migrate your Okta-federated applications to managed authentication under Azure AD. See how to migrate federation in a staged manner. services: active-directory author: gargi-sinha manager: martinco - ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 09/01/2021 +ms.date: 05/19/2022 ms.author: gasinh ms.subservice: app-mgmt +ms.custom: kr2b-contr-experiment --- # Tutorial: Migrate Okta federation to Azure Active Directory-managed authentication @@ -39,7 +39,7 @@ Seamless SSO can be deployed to password hash synchronization or pass-through au Follow the [deployment guide](../hybrid/how-to-connect-sso-quick-start.md#step-1-check-the-prerequisites) to ensure that you deploy all necessary prerequisites of seamless SSO to your users. -For our example, we'll configure password hash synchronization and seamless SSO. +For this example, you configure password hash synchronization and seamless SSO. ### Configure Azure AD Connect for password hash synchronization and seamless SSO @@ -47,15 +47,15 @@ Follow these steps to configure Azure AD Connect for password hash synchronizati 1. On your Azure AD Connect server, open the **Azure AD Connect** app and then select **Configure**. - ![Screenshot that shows the Azure A D icon and Configure button.](media/migrate-okta-federation-to-azure-active-directory/configure-azure-ad.png) + ![Screenshot that shows the Azure A D icon and the Configure button in the Azure A D Connect app.](media/migrate-okta-federation-to-azure-active-directory/configure-azure-ad.png) -1. Select **Change user sign-in** > **Next**. +1. Select **Change user sign-in**, and then select **Next**. - ![Screenshot that shows the page for changing user sign-in.](media/migrate-okta-federation-to-azure-active-directory/change-user-signin.png) + ![Screenshot of the Azure A D Connect app that shows the page for changing user sign-in.](media/migrate-okta-federation-to-azure-active-directory/change-user-signin.png) 1. Enter your global administrator credentials. - ![Screenshot that shows where to enter global admin credentials.](media/migrate-okta-federation-to-azure-active-directory/global-admin-credentials.png) + ![Screenshot of the Azure A D Connect app that shows where to enter global admin credentials.](media/migrate-okta-federation-to-azure-active-directory/global-admin-credentials.png) 1. Currently, the server is configured for federation with Okta. Change the selection to **Password Hash Synchronization**. Then select **Enable single sign-on**. @@ -65,15 +65,15 @@ Follow these steps to enable seamless SSO: 1. Enter the domain administrator credentials for the local on-premises system. Then select **Next**. - ![Screenshot that shows settings for user sign-in.](media/migrate-okta-federation-to-azure-active-directory/domain-admin-credentials.png) + ![Screenshot of the Azure A D Connect app that shows settings for user sign-in.](media/migrate-okta-federation-to-azure-active-directory/domain-admin-credentials.png) 1. On the final page, select **Configure** to update the Azure AD Connect server. - ![Screenshot that shows the configuration page.](media/migrate-okta-federation-to-azure-active-directory/update-azure-ad-connect-server.png) + ![Screenshot of the Ready to configure page of the Azure A D Connect app.](media/migrate-okta-federation-to-azure-active-directory/update-azure-ad-connect-server.png) 1. Ignore the warning for hybrid Azure AD join for now. You'll reconfigure the device options after you disable federation from Okta. - ![Screenshot that shows the link to configure device options.](media/migrate-okta-federation-to-azure-active-directory/reconfigure-device-options.png) + ![Screenshot of the Azure A D Connect app. A warning about the hybrid Azure A D join is visible. A link for configuring device options is also visible.](media/migrate-okta-federation-to-azure-active-directory/reconfigure-device-options.png) ## Configure staged rollout features @@ -83,7 +83,7 @@ After you enable password hash sync and seamless SSO on the Azure AD Connect ser 1. In the [Azure portal](https://portal.azure.com/#home), select **View** or **Manage Azure Active Directory**. - ![Screenshot that shows the Azure portal.](media/migrate-okta-federation-to-azure-active-directory/azure-portal.png) + ![Screenshot that shows the Azure portal. A welcome message is visible.](media/migrate-okta-federation-to-azure-active-directory/azure-portal.png) 1. On the **Azure Active Directory** menu, select **Azure AD Connect**. Then confirm that **Password Hash Sync** is enabled in the tenant. @@ -93,21 +93,21 @@ After you enable password hash sync and seamless SSO on the Azure AD Connect ser 1. Your **Password Hash Sync** setting might have changed to **On** after the server was configured. If the setting isn't enabled, enable it now. - Notice that **Seamless single sign-on** is set to **Off**. If you attempt to enable it, you'll get an error because it's already enabled for users in the tenant. + Notice that **Seamless single sign-on** is set to **Off**. If you attempt to enable it, you get an error because it's already enabled for users in the tenant. 1. Select **Manage groups**. - ![Screenshot that shows the button for managing groups.](media/migrate-okta-federation-to-azure-active-directory/password-hash-sync.png) + ![Screenshot of the Enable staged rollout features page in the Azure portal. A Manage groups button is visible.](media/migrate-okta-federation-to-azure-active-directory/password-hash-sync.png) -Follow the instructions to add a group to the password hash sync rollout. In the following example, the security group starts with 10 members. +1. Follow the instructions to add a group to the password hash sync rollout. In the following example, the security group starts with 10 members. -![Screenshot that shows an example of a security group.](media/migrate-okta-federation-to-azure-active-directory/example-security-group.png) + ![Screenshot of the Manage groups for Password Hash Sync page in the Azure portal. A group is visible in a table.](media/migrate-okta-federation-to-azure-active-directory/example-security-group.png) -After you add the group, wait for about 30 minutes while the feature takes effect in your tenant. When the feature has taken effect, your users will no longer be redirected to Okta when they attempt to access Office 365 services. +1. After you add the group, wait for about 30 minutes while the feature takes effect in your tenant. When the feature has taken effect, your users are no longer redirected to Okta when they attempt to access Office 365 services. The staged rollout feature has some unsupported scenarios: -- Legacy authentication such as POP3 and SMTP aren't supported. +- Legacy authentication protocols such as POP3 and SMTP aren't supported. - If you've configured hybrid Azure AD join for use with Okta, all the hybrid Azure AD join flows go to Okta until the domain is defederated. A sign-on policy should remain in Okta to allow legacy authentication for hybrid Azure AD join Windows clients. ## Create an Okta app in Azure AD @@ -120,30 +120,30 @@ To configure the enterprise application registration for Okta: 1. On the left menu, under **Manage**, select **Enterprise applications**. - ![Screenshot that shows the "Enterprise applications" selection.](media/migrate-okta-federation-to-azure-active-directory/enterprise-application.png) + ![Screenshot that shows the left menu of the Azure portal. Enterprise applications is visible.](media/migrate-okta-federation-to-azure-active-directory/enterprise-application.png) 1. On the **All applications** menu, select **New application**. - ![Screenshot that shows the "New application" selection.](media/migrate-okta-federation-to-azure-active-directory/new-application.png) + ![Screenshot that shows the All applications page in the Azure portal. A new application is visible.](media/migrate-okta-federation-to-azure-active-directory/new-application.png) 1. Select **Create your own application**. On the menu that opens, name the Okta app and select **Register an application you're working on to integrate with Azure AD**. Then select **Create**. - :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/register-application.png" alt-text="Screenshot that shows how to register an application." lightbox="media/migrate-okta-federation-to-azure-active-directory/register-application.png"::: + :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/register-application.png" alt-text="Screenshot that shows the Create your own application menu. The app name is visible. The option to integrate with Azure A D is turned on." lightbox="media/migrate-okta-federation-to-azure-active-directory/register-application.png"::: -1. Select **Accounts in any organizational directory (Any Azure AD Directory - Multitenant)** > **Register**. +1. Select **Accounts in any organizational directory (Any Azure AD Directory - Multitenant)**, and then select **Register**. ![Screenshot that shows how to register an application and change the application account.](media/migrate-okta-federation-to-azure-active-directory/register-change-application.png) 1. On the Azure AD menu, select **App registrations**. Then open the newly created registration. - ![Screenshot that shows the new app registration.](media/migrate-okta-federation-to-azure-active-directory/app-registration.png) + ![Screenshot that shows the App registrations page in the Azure portal. The new app registration is visible.](media/migrate-okta-federation-to-azure-active-directory/app-registration.png) 1. Record your tenant ID and application ID. >[!Note] >You'll need the tenant ID and application ID to configure the identity provider in Okta. - ![Screenshot that shows the tenant ID and application ID.](media/migrate-okta-federation-to-azure-active-directory/record-ids.png) + ![Screenshot that shows the Okta Application Access page in the Azure portal. The tenant I D and application I D are called out.](media/migrate-okta-federation-to-azure-active-directory/record-ids.png) 1. On the left menu, select **Certificates & secrets**. Then select **New client secret**. Give the secret a generic name and set its expiration date. @@ -152,40 +152,40 @@ To configure the enterprise application registration for Okta: >[!NOTE] >The value and ID aren't shown later. If you fail to record this information now, you'll have to regenerate a secret. - ![Screenshot that shows where to record the secret's value and I D.](media/migrate-okta-federation-to-azure-active-directory/record-secrets.png) + ![Screenshot of the Certificates and secrets page. The value and I D of the secret are visible.](media/migrate-okta-federation-to-azure-active-directory/record-secrets.png) 1. On the left menu, select **API permissions**. Grant the application access to the OpenID Connect (OIDC) stack. 1. Select **Add a permission** > **Microsoft Graph** > **Delegated permissions**. - :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/delegated-permissions.png" alt-text="Screenshot that shows delegated permissions." lightbox="media/migrate-okta-federation-to-azure-active-directory/delegated-permissions.png"::: + :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/delegated-permissions.png" alt-text="Screenshot that shows the A P I permissions page of the Azure portal. A delegated permission for reading is visible." lightbox="media/migrate-okta-federation-to-azure-active-directory/delegated-permissions.png"::: 1. In the OpenID permissions section, add **email**, **openid**, and **profile**. Then select **Add permissions**. - :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/add-permissions.png" alt-text="Screenshot that shows how to add permissions." lightbox="media/migrate-okta-federation-to-azure-active-directory/add-permissions.png"::: + :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/add-permissions.png" alt-text="Screenshot that shows the A P I permissions page of the Azure portal. Permissions for email, openid, profile, and reading are visible." lightbox="media/migrate-okta-federation-to-azure-active-directory/add-permissions.png"::: 1. Select **Grant admin consent for \** and wait until the **Granted** status appears. - ![Screenshot that shows granted consent.](media/migrate-okta-federation-to-azure-active-directory/grant-consent.png) + ![Screenshot of the A P I permissions page that shows a message about granted consent.](media/migrate-okta-federation-to-azure-active-directory/grant-consent.png) 1. On the left menu, select **Branding**. For **Home page URL**, add your user's application home page. - ![Screenshot that shows how to add branding.](media/migrate-okta-federation-to-azure-active-directory/add-branding.png) + ![Screenshot of the Branding page in the Azure portal. Several input boxes are visible, including one for the home page U R L.](media/migrate-okta-federation-to-azure-active-directory/add-branding.png) 1. In the Okta administration portal, select **Security** > **Identity Providers** to add a new identity provider. Select **Add Microsoft**. - ![Screenshot that shows how to add the identity provider.](media/migrate-okta-federation-to-azure-active-directory/configure-idp.png) + ![Screenshot of the Okta administration portal. Add Microsoft is visible in the Add Identity Provider list.](media/migrate-okta-federation-to-azure-active-directory/configure-idp.png) 1. On the **Identity Provider** page, copy your application ID to the **Client ID** field. Copy the client secret to the **Client Secret** field. -1. Select **Show Advanced Settings**. By default, this configuration will tie the user principal name (UPN) in Okta to the UPN in Azure AD for reverse-federation access. +1. Select **Show Advanced Settings**. By default, this configuration ties the user principal name (UPN) in Okta to the UPN in Azure AD for reverse-federation access. >[!IMPORTANT] >If your UPNs in Okta and Azure AD don't match, select an attribute that's common between users. -1. Finish your selections for autoprovisioning. By default, if a user doesn't match in Okta, the system will attempt to provision the user in Azure AD. If you've migrated provisioning away from Okta, select **Redirect to Okta sign-in page**. +1. Finish your selections for autoprovisioning. By default, if no match is found for an Okta user, the system attempts to provision the user in Azure AD. If you've migrated provisioning away from Okta, select **Redirect to Okta sign-in page**. - ![Screenshot that shows the option for redirecting to the Okta sign-in page.](media/migrate-okta-federation-to-azure-active-directory/redirect-okta.png) + ![Screenshot of the General Settings page in the Okta admin portal. The option for redirecting to the Okta sign-in page is visible.](media/migrate-okta-federation-to-azure-active-directory/redirect-okta.png) Now that you've created the identity provider (IDP), you need to send users to the correct IDP. @@ -195,7 +195,7 @@ To configure the enterprise application registration for Okta: In this example, the **Division** attribute is unused on all Okta profiles, so it's a good choice for IDP routing. - ![Screenshot that shows the division attribute for I D P routing.](media/migrate-okta-federation-to-azure-active-directory/division-idp-routing.png) + ![Screenshot of the Edit Rule page in the Okta admin portal. A rule definition that involves the division attribute is visible.](media/migrate-okta-federation-to-azure-active-directory/division-idp-routing.png) 1. Now that you've added the routing rule, record the redirect URI so you can add it to the application registration. @@ -203,23 +203,23 @@ To configure the enterprise application registration for Okta: 1. On your application registration, on the left menu, select **Authentication**. Then select **Add a platform** > **Web**. - :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/add-platform.png" alt-text="Screenshot that shows how to add a web platform." lightbox="media/migrate-okta-federation-to-azure-active-directory/add-platform.png"::: + :::image type="content" source="media/migrate-okta-federation-to-azure-active-directory/add-platform.png" alt-text="Screenshot of the Authentication page in the Azure portal. Add a platform and a Configure platforms menu are visible." lightbox="media/migrate-okta-federation-to-azure-active-directory/add-platform.png"::: 1. Add the redirect URI that you recorded in the IDP in Okta. Then select **Access tokens** and **ID tokens**. - ![Screenshot that shows Okta access and I D tokens.](media/migrate-okta-federation-to-azure-active-directory/access-id-tokens.png) + ![Screenshot of the Configure Web page in the Azure portal. A redirect U R I is visible. The access and I D tokens are selected.](media/migrate-okta-federation-to-azure-active-directory/access-id-tokens.png) 1. In the admin console, select **Directory** > **People**. Select your first test user to edit the profile. 1. In the profile, add **ToAzureAD** as in the following image. Then select **Save**. - ![Screenshot that shows how to edit a profile.](media/migrate-okta-federation-to-azure-active-directory/profile-editing.png) + ![Screenshot of the Okta admin portal. Profile settings are visible, and the Division box contains ToAzureAD.](media/migrate-okta-federation-to-azure-active-directory/profile-editing.png) -1. Try to sign in to the [Microsoft 356 portal](https://portal.office.com) as the modified user. If your user isn't a part of the managed authentication pilot, you'll notice that your action loops. To exit the loop, add the user to the managed authentication experience. +1. Try to sign in to the [Microsoft 356 portal](https://portal.office.com) as the modified user. If your user isn't part of the managed authentication pilot, your action enters a loop. To exit the loop, add the user to the managed authentication experience. ## Test Okta app access on pilot members -After you configure the Okta app in Azure AD and you configure the IDP in the Okta portal, you must assign the application to users. +After you configure the Okta app in Azure AD and you configure the IDP in the Okta portal, assign the application to users. 1. In the Azure portal, select **Azure Active Directory** > **Enterprise applications**. @@ -228,15 +228,15 @@ After you configure the Okta app in Azure AD and you configure the IDP in the Ok >[!NOTE] >You can add users and groups only from the **Enterprise applications** page. You can't add users from the **App registrations** menu. - ![Screenshot that shows how to add a group.](media/migrate-okta-federation-to-azure-active-directory/add-group.png) + ![Screenshot of the Users and groups page of the Azure portal. A group called Managed Authentication Staging Group is visible.](media/migrate-okta-federation-to-azure-active-directory/add-group.png) 1. After about 15 minutes, sign in as one of the managed authentication pilot users and go to [My Apps](https://myapplications.microsoft.com). - ![Screenshot that shows the My Apps gallery.](media/migrate-okta-federation-to-azure-active-directory/my-applications.png) + ![Screenshot that shows the My Apps gallery. An icon for Okta Application Access is visible.](media/migrate-okta-federation-to-azure-active-directory/my-applications.png) 1. Select the **Okta Application Access** tile to return the user to the Okta home page. -## Test-managed authentication on pilot members +## Test managed authentication on pilot members After you configure the Okta reverse-federation app, have your users conduct full testing on the managed authentication experience. We recommend that you set up company branding to help your users recognize the tenant they're signing in to. For more information, see [Add branding to your organization's Azure AD sign-in page](../fundamentals/customize-branding.md). diff --git a/articles/active-directory/manage-apps/migrate-okta-sync-provisioning-to-azure-active-directory.md b/articles/active-directory/manage-apps/migrate-okta-sync-provisioning-to-azure-active-directory.md index 3d123b3b1123..385a83e90af0 100644 --- a/articles/active-directory/manage-apps/migrate-okta-sync-provisioning-to-azure-active-directory.md +++ b/articles/active-directory/manage-apps/migrate-okta-sync-provisioning-to-azure-active-directory.md @@ -1,22 +1,22 @@ --- -title: Tutorial to migrate Okta sync provisioning to Azure AD Connect-based synchronization +title: Migrate Okta sync provisioning to Azure AD Connect titleSuffix: Active Directory -description: In this tutorial, you learn how to migrate your Okta sync provisioning to Azure AD Connect-based synchronization. +description: Learn how to migrate user provisioning from Okta to Azure Active Directory (Azure AD). See how to use Azure AD Connect server or Azure AD cloud provisioning. services: active-directory-b2c author: gargi-sinha manager: martinco - ms.service: active-directory ms.workload: identity ms.topic: how-to -ms.date: 09/01/2021 +ms.date: 05/19/2022 ms.author: gasinh ms.subservice: app-mgmt +ms.custom: kr2b-contr-experiment --- # Tutorial: Migrate Okta sync provisioning to Azure AD Connect-based synchronization -In this tutorial, you'll learn how your organization can currently migrate User provisioning from Okta to Azure Active Directory (Azure AD) and migrate either User sync or Universal sync to Azure AD Connect. This capability will enable further provisioning into Azure AD and Office 365. +In this tutorial, you'll learn how your organization can migrate user provisioning from Okta to Azure Active Directory (Azure AD) and migrate either User Sync or Universal Sync to Azure AD Connect. This capability enables further provisioning into Azure AD and Office 365. Migrating synchronization platforms isn't a small change. Each step of the process mentioned in this article should be validated against your own environment before you remove Azure AD Connect from staging mode or enable the Azure AD cloud provisioning agent. @@ -24,21 +24,21 @@ Migrating synchronization platforms isn't a small change. Each step of the proce When you switch from Okta provisioning to Azure AD, you have two choices. You can use either an Azure AD Connect server or Azure AD cloud provisioning. To understand the differences between the two, read the [comparison article from Microsoft](../cloud-sync/what-is-cloud-sync.md#comparison-between-azure-ad-connect-and-cloud-sync). -Azure AD cloud provisioning will be the most familiar migration path for Okta customers who use Universal or User sync. The cloud provisioning agents are lightweight. They can be installed on or near domain controllers like the Okta directory sync agents. Don't install them on the same server. +Azure AD cloud provisioning is the most familiar migration path for Okta customers who use Universal Sync or User Sync. The cloud provisioning agents are lightweight. You can install them on or near domain controllers like the Okta directory sync agents. Don't install them on the same server. Use an Azure AD Connect server if your organization needs to take advantage of any of the following technologies when you synchronize users: - Device synchronization: Hybrid Azure AD join or Hello for Business -- Passthrough authentication -- More than 150,000-object support +- Pass-through authentication +- Support for more than 150,000 objects - Support for writeback >[!NOTE] ->All prerequisites should be taken into consideration when you install Azure AD Connect or Azure AD cloud provisioning. To learn more before you continue with installation, see [Prerequisites for Azure AD Connect](../hybrid/how-to-connect-install-prerequisites.md). +>Take all prerequisites into consideration when you install Azure AD Connect or Azure AD cloud provisioning. To learn more before you continue with installation, see [Prerequisites for Azure AD Connect](../hybrid/how-to-connect-install-prerequisites.md). ## Confirm ImmutableID attribute synchronized by Okta -ImmutableID is the core attribute used to tie synchronized objects to their on-premises counterparts. Okta takes the Active Directory objectGUID of an on-premises object and converts it to a Base64 encoded string. Then, by default it stamps that string to the ImmutableID field in Azure AD. +ImmutableID is the core attribute used to tie synchronized objects to their on-premises counterparts. Okta takes the Active Directory objectGUID of an on-premises object and converts it to a Base64-encoded string. By default, it then stamps that string to the ImmutableID field in Azure AD. You can connect to Azure AD PowerShell and examine the current ImmutableID value. If you've never used the Azure AD PowerShell module, run `Install-Module AzureAD` in an administrative PowerShell session before you run the following commands: @@ -52,15 +52,15 @@ If you already have the module, you might receive a warning to update to the lat After the module is installed, import it and follow these steps to connect to the Azure AD service: -1. Enter your global administrator credentials in the modern authentication window. +1. Enter your global administrator credentials in the authentication window. - ![Screenshot that shows import-module.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/import-module.png) + ![Screenshot of the Azure A D PowerShell window. The install-module, import-module, and connect commands are visible with their output.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/import-module.png) -1. After you connect to the tenant, verify the settings for your ImmutableID values. The example shown uses Okta defaults of objectGUID to ImmutableID. +1. After you connect to the tenant, verify the settings for your ImmutableID values. The following example uses the Okta default approach of converting the objectGUID into the ImmutableID. - ![Screenshot that shows Okta defaults of objectGUID to ImmutableID.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/okta-default-objectid.png) + ![Screenshot of the Azure A D PowerShell window. The Get-AzureADUser command is visible. Its output includes the UserPrincipalName and the ImmutableId.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/okta-default-objectid.png) -1. There are several ways to manually confirm the objectGUID to Base64 conversion on-premises. For individual validation, use this example: +1. There are several ways to manually confirm the conversion from objectGUID to Base64 on-premises. To test an individual value, use these commands: ```PowerShell Get-ADUser onpremupn | fl objectguid @@ -68,27 +68,27 @@ After the module is installed, import it and follow these steps to connect to th [system.convert]::ToBase64String(([GUID]$objectGUID).ToByteArray()) ``` - ![Screenshot that shows how to manually change Okta objectGUID to ImmutableID.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/manual-objectguid.png) + ![Screenshot of the Azure A D PowerShell window. The commands that convert an objectGUID to an ImmutableID are visible with their output.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/manual-objectguid.png) ## Mass validation methods for objectGUID -Before you cut over to Azure AD Connect, it's critical to validate that the ImmutableID values in Azure AD are going to exactly match their on-premises values. +Before you move to Azure AD Connect, it's critical to validate that the ImmutableID values in Azure AD exactly match their on-premises values. -The example will grab *all* on-premises Azure AD users and export a list of their objectGUID values and ImmutableID values already calculated to a CSV file. +The following command gets *all* on-premises Azure AD users and exports a list of their objectGUID values and ImmutableID values already calculated to a CSV file. -1. Run these commands in PowerShell on a domain controller on-premises: +1. Run this command in PowerShell on an on-premises domain controller: ```PowerShell - Get-ADUser -Filter * -Properties objectGUID | Select -Object + Get-ADUser -Filter * -Properties objectGUID | Select-Object UserPrincipalName, Name, objectGUID, @{Name = 'ImmutableID'; Expression = { [system.convert]::ToBase64String((GUID).tobytearray()) } } | export-csv C:\Temp\OnPremIDs.csv ``` - ![Screenshot that shows domain controller on-premises commands.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/domain-controller.png) + ![Screenshot of a .csv file that lists sample output data. Columns include UserPrincipalName, Name, objectGUID, and ImmutableID.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/domain-controller.png) -1. Run these commands in an Azure AD PowerShell session to gather the already synchronized values: +1. Run this command in an Azure AD PowerShell session to list the already synchronized values: ```powershell Get-AzureADUser -all $true | Where-Object {$_.dirsyncenabled -like @@ -98,9 +98,9 @@ The example will grab *all* on-premises Azure AD users and export a list of thei ImmutableID | export-csv C:\\temp\\AzureADSyncedIDS.csv ``` - ![Screenshot that shows an Azure AD PowerShell session.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/azure-ad-powershell.png) + ![Screenshot of a .csv file that lists sample output data. Columns include UserPrincipalName, objectGUID, and ImmutableID.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/azure-ad-powershell.png) - After you have both exports, confirm that the ImmutableID for each user matches. + After you have both exports, confirm that each user's ImmutableID values match. >[!IMPORTANT] >If your ImmutableID values in the cloud don't match objectGUID values, you've modified the defaults for Okta sync. You've likely chosen another attribute to determine ImmutableID values. Before you move on to the next section, it's critical to identify which source attribute is populating ImmutableID values. Ensure that you update the attribute Okta is syncing before you disable Okta sync. @@ -109,18 +109,24 @@ The example will grab *all* on-premises Azure AD users and export a list of thei After you've prepared your list of source and destination targets, it's time to install an Azure AD Connect server. If you've opted to use Azure AD Connect cloud provisioning, skip this section. -1. Continue with [downloading and installing Azure AD Connect](../hybrid/how-to-connect-install-custom.md) to your chosen server. +1. Download and install Azure AD Connect on your chosen server by following the instructions in [Custom installation of Azure Active Directory Connect](../hybrid/how-to-connect-install-custom.md). + +1. In the left panel, select **Identifying users**. -1. On the **Identifying users** page, under **Select how users should be identified with Azure AD**, select the **Choose a specific attribute** option. Then, select **mS-DS-ConsistencyGUID** if you haven't modified the Okta defaults. +1. On the **Uniquely identifying your users** page, under **Select how users should be identified with Azure AD**, select **Choose a specific attribute**. Then select **mS-DS-ConsistencyGUID** if you haven't modified the Okta defaults. >[!WARNING] - >This is the most critical step on this page. Before you select **Next**, ensure that the attribute you're selecting for a source anchor is what *currently* populates your existing Azure AD users. If you select the wrong attribute, you must uninstall and reinstall Azure AD Connect to reselect this option. + >This step is critical. Ensure that the attribute that you select for a source anchor is what *currently* populates your existing Azure AD users. If you select the wrong attribute, you need to uninstall and reinstall Azure AD Connect to reselect this option. - ![Screenshot that shows mS-DS-ConsistencyGuid.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/consistency-guid.png) + ![Screenshot of the Azure A D Connect window. The page is titled Uniquely identifying your users, and the mS-DS-ConsistencyGuid attribute is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/consistency-guid.png) + +1. Select **Next**. -1. On the **Configure** page, make sure to select the **Enable staging mode** checkbox. Then select **Install**. +1. In the left panel, select **Configure**. - ![Screenshot that shows the Enable staging mode checkbox.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/enable-staging-mode.png) +1. On the **Ready to configure** page, select **Enable staging mode**. Then select **Install**. + + ![Screenshot of the Azure A D Connect window. The page is titled Ready to configure, and the Enable staging mode checkbox is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/enable-staging-mode.png) 1. After the configuration is complete, select **Exit**. @@ -128,29 +134,29 @@ After you've prepared your list of source and destination targets, it's time to 1. Open **Synchronization Service** as an administrator. - ![Screenshot that shows opening Synchronization Service.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/open-sync-service.png) + ![Screenshot that shows the Synchronization Service shortcut menus, with More and Run as administrator selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/open-sync-service.png) -1. Check that **Full Synchronization** to the domain.onmicrosoft.com connector space has users displaying under the **Connectors with Flow Updates** tab. +1. Find the **Full Synchronization** to the domain.onmicrosoft.com connector space. Check that there are users under the **Connectors with Flow Updates** tab. - ![Screenshot that shows the Connectors with Flow Updates tab.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/connector-flow-update.png) + ![Screenshot of the Synchronization Service window. The Connectors with Flow Updates tab is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/connector-flow-update.png) 1. Verify there are no deletions pending in the export. Select the **Connectors** tab, and then highlight the domain.onmicrosoft.com connector space. Then select **Search Connector Space**. - ![Screenshot that shows the Search Connector Space action.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/search-connector-space.png) + ![Screenshot of the Synchronization Service window. The Search Connector Space action is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/search-connector-space.png) -1. In the **Search Connector Space** dialog, select the **Scope** dropdown and select **Pending Export**. +1. In the **Search Connector Space** dialog, under **Scope**, select **Pending Export**. - ![Screenshot that shows Pending Export.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/pending-export.png) + ![Screenshot of the Search Connector Space dialog. In the Scope list, Pending Export is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/pending-export.png) 1. Select **Delete** and then select **Search**. If all objects have matched properly, there should be zero matching records for **Deletes**. Record any objects pending deletion and their on-premises values. - ![Screenshot that shows deleted matching records.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/delete-matching-records.png) + ![Screenshot of the Search Connector Space dialog. In the search results, Text is highlighted that indicates that there were zero matching records.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/delete-matching-records.png) -1. Clear **Delete**, and select **Add** and **Modify**, followed by a search. You should see update functions for all users currently being synchronized to Azure AD via Okta. Add any new objects that Okta isn't currently syncing, but that exist in the organizational unit (OU) structure that was selected during the Azure AD Connect installation. +1. Clear **Delete**, and select **Add** and **Modify**. Then select **Search**. You should see update functions for all users currently being synchronized to Azure AD via Okta. Add any new objects that Okta isn't currently syncing, but that exist in the organizational unit (OU) structure that was selected during the Azure AD Connect installation. - ![Screenshot that shows adding a new object.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/add-new-object.png) + ![Screenshot of the Search Connector Space dialog. In the search results, seven records are visible.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/add-new-object.png) -1. Double-clicking on updates shows what Azure AD Connect will communicate with Azure AD. +1. To see what Azure AD Connect will communicate with Azure AD, double-click an update. 1. If there are any **add** functions for a user who already exists in Azure AD, their on-premises account doesn't match their cloud account. AD Connect has determined it will create a new object and record any new adds that are unexpected. Make sure to correct the ImmutableID value in Azure AD before you exit the staging mode. @@ -158,14 +164,14 @@ After you've prepared your list of source and destination targets, it's time to Verify that your updates still include all attributes expected in Azure AD. If multiple attributes are being deleted, you might need to manually populate these on-premises AD values before you remove the staging mode. - ![Screenshot that shows populating on-premises add values.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/on-premises-ad-values.png) + ![Screenshot of the Connector Space Object Properties window. The attributes for user John Smith are visible.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/on-premises-ad-values.png) >[!NOTE] - >Before you continue to the next step, ensure all user attributes are syncing properly and show on the **Pending Export** tab as expected. If they're deleted, make sure their ImmutableID values match and the user is in one of the selected OUs for synchronization. + >Before you continue to the next step, ensure all user attributes are syncing properly and appear on the **Pending Export** tab as expected. If they're deleted, make sure their ImmutableID values match and the user is in one of the selected OUs for synchronization. ## Install Azure AD cloud sync agents -After you've prepared your list of source and destination targets, it's time to [install and configure Azure AD cloud sync agents](../cloud-sync/tutorial-single-forest.md). If you've opted to use an Azure AD Connect server, skip this section. +After you've prepared your list of source and destination targets, install and configure Azure AD cloud sync agents by following the instructions in [Tutorial: Integrate a single forest with a single Azure AD tenant](../cloud-sync/tutorial-single-forest.md). If you've opted to use an Azure AD Connect server, skip this section. ## Disable Okta provisioning to Azure AD @@ -173,14 +179,14 @@ After you've verified the Azure AD Connect installation and your pending exports 1. Go to your Okta portal, select **Applications**, and then select your Okta app used to provision users to Azure AD. Open the **Provisioning** tab and select the **Integration** section. - ![Screenshot that shows the Integration section in Okta.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/integration-section.png) + ![Screenshot that shows the Integration section in the Okta portal.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/integration-section.png) -1. Select **Edit**, clear the **Enable API integration** option and select **Save**. +1. Select **Edit**, clear the **Enable API integration** option, and select **Save**. - ![Screenshot that shows editing the Enable API integration in Okta.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/edit-api-integration.png) + ![Screenshot that shows the Integration section in the Okta portal. A message on the page says provisioning is not enabled.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/edit-api-integration.png) >[!NOTE] - >If you have multiple Office 365 apps handling provisioning to Azure AD, ensure they're all switched off. + >If you have multiple Office 365 apps that handle provisioning to Azure AD, ensure they're all switched off. ## Disable staging mode in Azure AD Connect @@ -188,41 +194,43 @@ After you disable Okta provisioning, the Azure AD Connect server is ready to beg 1. Run the installation wizard from the desktop again and select **Configure**. - ![Screenshot that shows the Azure AD Connect server.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/azure-ad-connect-server.png) + ![Screenshot of the Azure A D Connect window. The welcome page is visible with a Configure button at the bottom.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/azure-ad-connect-server.png) 1. Select **Configure staging mode** and then select **Next**. Enter your global administrator credentials. - ![Screenshot that shows the Configure staging mode option.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/configure-staging-mode.png) + ![Screenshot of the Azure A D Connect window. On the left, Tasks is selected. On the Additional tasks page, Configure staging mode is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/configure-staging-mode.png) -1. Clear the **Enable staging mode** option and select **Next**. +1. Clear **Enable staging mode** and select **Next**. - ![Screenshot that shows clearing the Enable staging mode option.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/uncheck-enable-staging-mode.png) + ![Screenshot of the Azure A D Connect window. On the left, Staging Mode is selected. On the Configure staging mode page, nothing is selected.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/uncheck-enable-staging-mode.png) 1. Select **Configure** to continue. - ![Screenshot that shows selecting the Configure button.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/ready-to-configure.png) + ![Screenshot of the Ready to configure page in Azure A D Connect. On the left, Configure is selected. A Configure button is also visible.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/ready-to-configure.png) -1. After the configuration completes, open the **Synchronization Service** as an administrator. View the **Export** on the domain.onmicrosoft.com connector. Verify that all additions, updates, and deletions are done as expected. +1. After the configuration finishes, open the **Synchronization Service** as an administrator. View the **Export** on the domain.onmicrosoft.com connector. Verify that all additions, updates, and deletions are done as expected. - ![Screenshot that shows verifying the sync service.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/verify-sync-service.png) + ![Screenshot of the Synchronization Service window. An export line is selected, and export statistics like the number of adds, updates, and deletes are visible.](./media/migrate-okta-sync-provisioning-to-azure-active-directory-connect-based-synchronization/verify-sync-service.png) -You've now successfully migrated to Azure AD Connect server-based provisioning. Updates and expansions to the feature set of Azure AD Connect can be done by rerunning the installation wizard. +You've now successfully migrated to Azure AD Connect server-based provisioning. You can update and expand the feature set of Azure AD Connect by rerunning the installation wizard. ## Enable cloud sync agents -After you disable Okta provisioning, the Azure AD cloud sync agent is ready to begin synchronizing objects. Return to the [Azure AD portal](https://aad.portal.azure.com/). +After you disable Okta provisioning, the Azure AD cloud sync agent is ready to begin synchronizing objects. + +1. Go to the [Azure AD portal](https://aad.portal.azure.com/). -1. Modify the **Configuration** profile to **Enabled**. +1. In the **Configuration** profile, select **Enable**. 1. Return to the provisioning menu and select **Logs**. -1. Evaluate that the provisioning connector has properly updated in-place objects. The cloud sync agents are nondestructive. They'll fail their updates if a match didn't occur properly. +1. Check that the provisioning connector has properly updated in-place objects. The cloud sync agents are nondestructive. Their updates fail if a match isn't found. 1. If a user is mismatched, make the necessary updates to bind the ImmutableID values. Then restart the cloud provisioning sync. ## Next steps -For more information about migrating from Okta to Azure AD, see: +For more information about migrating from Okta to Azure AD, see these resources: - [Migrate applications from Okta to Azure AD](migrate-applications-from-okta-to-azure-active-directory.md) - [Migrate Okta federation to Azure AD managed authentication](migrate-okta-federation-to-azure-active-directory.md) diff --git a/articles/active-directory/managed-identities-azure-resources/TOC.yml b/articles/active-directory/managed-identities-azure-resources/TOC.yml index 2866395dd710..8e9cece892b3 100644 --- a/articles/active-directory/managed-identities-azure-resources/TOC.yml +++ b/articles/active-directory/managed-identities-azure-resources/TOC.yml @@ -106,6 +106,8 @@ href: howto-assign-access-cli.md - name: PowerShell href: howto-assign-access-powershell.md + - name: Using Azure Policy + href: how-to-assign-managed-identity-via-azure-policy.md - name: Manage user-assigned managed identities href: how-manage-user-assigned-managed-identities.md diff --git a/articles/active-directory/managed-identities-azure-resources/how-to-assign-managed-identity-via-azure-policy.md b/articles/active-directory/managed-identities-azure-resources/how-to-assign-managed-identity-via-azure-policy.md new file mode 100644 index 000000000000..b652ddf3cf11 --- /dev/null +++ b/articles/active-directory/managed-identities-azure-resources/how-to-assign-managed-identity-via-azure-policy.md @@ -0,0 +1,109 @@ +--- +title: Use Azure Policy to assign managed identities (preview) +description: Documentation for the Azure Policy that can be used to assign managed identities to Azure resources. +services: active-directory +author: karavar +manager: skwan +editor: barclayn +ms.service: active-directory +ms.subservice: msi +ms.topic: how-to +ms.workload: identity +ms.date: 05/23/2022 +ms.author: vakarand +ms.collection: M365-identity-device-management +--- + +# [Preview] Use Azure Policy to assign managed identities + + +[Azure Policy](../../governance/policy/overview.md) helps enforce organizational standards and assess compliance at scale. Through its compliance dashboard, Azure policy provides an aggregated view that helps administrators evaluate the overall state of the environment. You have the ability to drill down to the per-resource, per-policy granularity. It also helps bring your resources to compliance through bulk remediation for existing resources and automatic remediation for new resources. Common use cases for Azure Policy include implementing governance for: + +- Resource consistency +- Regulatory compliance +- Security +- Cost +- Management + + +Policy definitions for these common use cases are already available in your Azure environment to help you get started. + +Azure Monitoring Agents require a [managed identity](overview.md) on the monitored Azure Virtual Machines (VMs). This document describes the behavior of a built-in Azure Policy provided by Microsoft that helps ensure a managed identity, needed for these scenarios, is assigned to VMs at scale. + +While using system-assigned managed identity is possible, when used at scale (for example, for all VMs in a subscription) it results in substantial number of identities created (and deleted) in Azure AD (Azure Active Directory). To avoid this churn of identities, it is recommended to use user-assigned managed identities, which can be created once and shared across multiple VMs. + +> [!NOTE] +> We recommend using a user-assigned managed identity per Azure subscription per Azure region. + +The policy is designed to implement this recommendation. + +## Policy definition and details + +- [Policy for Virtual Machines](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fd367bd60-64ca-4364-98ea-276775bddd94) +- [Policy for Virtual Machine Scale Sets](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F516187d4-ef64-4a1b-ad6b-a7348502976c) + + + +When executed, the policy takes the following actions: + +1. Create, if not exist, a new built-in user-assigned managed identity in the subscription and each Azure region based on the VMs that are in scope of the policy. +2. Once created, put a lock on the user-assigned managed identity so that it will not be accidentally deleted. +3. Assign the built-in user-assigned managed identity to Virtual Machines from the subscription and region based on the VMs that are in scope of the policy. +> [!NOTE] +> If the Virtual Machine has exactly 1 user-assigned managed identity already assigned, then the policy skips this VM to assign the built-in identity. This is to make sure assignment of the policy does not break applications that take a dependency on [the default behavior of the token endpoint on IMDS.](managed-identities-faq.md#what-identity-will-imds-default-to-if-dont-specify-the-identity-in-the-request) + + +There are two scenarios to use the policy: + +- Let the policy create and use a “built-in” user-assigned managed identity. +- Bring your own user-assigned managed identity. + +The policy takes the following input parameters: + +- Bring-Your-Own-UAMI? - Should the policy create, if not exist, a new user-assigned managed identity? +- If set to true, then you must specify: + - Name of the managed identity + - Resource group in which the managed identity should be created. +- If set to false, then no additional input is needed. + - The policy will create the required user-assigned managed identity called “built-in-identity” in a resource group called “built-in-identity-rg". + +## Using the policy +### Creating the policy assignment + +The policy definition can be assigned to different scopes in Azure – at the management group subscription or a specific resource group. As policies need to be enforced all the time, the assignment operation is performed using a managed identity associated with the policy-assignment object. The policy assignment object supports both system-assigned and user-assigned managed identity. +For example, Joe can create a user-assigned managed identity called PolicyAssignmentMI. The built-in policy creates a user-assigned managed identity in each subscription and in each region with resources that are in scope of the policy assignment. The user-assigned managed identities created by the policy has the following resourceId format: + +> /subscriptions/your-subscription-id/resourceGroups/built-in-identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/built-in-identity-{location} + +For example: +> /subscriptions/aaaabbbb-aaaa-bbbb-1111-111122223333/resourceGroups/built-in-identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/built-in-identity-eastus + +### Required authorization + +For PolicyAssignmentMI managed identity to be able to assign the built-in policy across the specified scope, it needs the following permissions, expressed as an Azure RBAC (Azure role-based access control) Role Assignment: + +| Principal| Role / Action | Scope | Purpose | +|----|----|----------------|----| +|PolicyAssigmentMI |Managed Identity Operator | /subscription/subscription-id/resourceGroups/built-in-identity
OR
Bring-your-own-User-assinged-Managed identity |Required to assign the built-in identity to VMs.| +|PolicyAssigmentMI |Contributor | /subscription/subscription-id> |Required to create the resource-group that holds the built-in managed identity in the subscription. | +|PolicyAssigmentMI |Managed Identity Contributor | /subscription/subscription-id/resourceGroups/built-in-identity |Required to create a new user-assigned managed identity.| +|PolicyAssigmentMI |User Access Administrator | /subscription/subscription-id/resourceGroups/built-in-identity
OR
Bring-your-own-User-assigned-Managed identity |Required to set a lock on the user-assigned managed identity created by the policy.| + + +As the policy assignment object must have this permission ahead of time, PolicyAssignmentMI cannot be a system-assigned managed identity for this scenario. The user performing the policy assignment task must pre-authorize PolicyAssignmentMI ahead of time with the above role assignments. + +As you can see the resultant least privilege role required is “contributor” at the subscription scope. + + + +## Known issues + +Possible race condition with another deployment that changes the identities assigned to a VM can result in unexpected results. + +If there are two or more parallel deployments updating the same virtual machine and they all change the identity configuration of the virtual machine, then it is possible, under specific race conditions, that all expected identities will NOT be assigned to the machines. +For example, if the policy in this document is updating the managed identities of a VM and at the same time another process is also making changes to the managed identities section, then it is not guaranteed that all the expected identities are properly assigned to the VM. + + +## Next steps + +- [Deploy Azure Monitoring Agent](../../azure-monitor/overview.md) \ No newline at end of file diff --git a/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md b/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md index d6cb49d9eede..14e3be588b5b 100644 --- a/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md +++ b/articles/active-directory/privileged-identity-management/pim-resource-roles-configure-alerts.md @@ -10,7 +10,7 @@ ms.topic: how-to ms.tgt_pltfrm: na ms.workload: identity ms.subservice: pim -ms.date: 10/07/2021 +ms.date: 05/24/2022 ms.author: curtand ms.reviewer: shaunliu ms.custom: pim @@ -31,11 +31,15 @@ Select an alert to see a report that lists the users or roles that triggered the ## Alerts -| Alert | Severity | Trigger | Recommendation | -| --- | --- | --- | --- | -| **Too many owners assigned to a resource** |Medium |Too many users have the owner role. |Review the users in the list and reassign some to less privileged roles. | -| **Too many permanent owners assigned to a resource** |Medium |Too many users are permanently assigned to a role. |Review the users in the list and re-assign some to require activation for role use. | -| **Duplicate role created** |Medium |Multiple roles have the same criteria. |Use only one of these roles. | +Alert | Severity | Trigger | Recommendation +--- | --- | --- | --- +**Too many owners assigned to a resource** |Medium |Too many users have the owner role. |Review the users in the list and reassign some to less privileged roles. +**Too many permanent owners assigned to a resource** |Medium |Too many users are permanently assigned to a role. |Review the users in the list and re-assign some to require activation for role use. +**Duplicate role created** |Medium |Multiple roles have the same criteria. |Use only one of these roles. +**Roles are being assigned outside of Privileged Identity Management (Preview)** | High | A role is managed directly through the Azure IAM resource blade or the Azure Resource Manager API | Review the users in the list and remove them from privileged roles assigned outside of Privilege Identity Management. + +> [!Note] +> During the public preview of the **Roles are being assigned outside of Privileged Identity Management (Preview)** alert, Microsoft supports only permissions that are assigned at the subscription level. ### Severity diff --git a/articles/active-directory/reports-monitoring/howto-analyze-activity-logs-log-analytics.md b/articles/active-directory/reports-monitoring/howto-analyze-activity-logs-log-analytics.md index 9f1a68243576..b8ae5a2299fd 100644 --- a/articles/active-directory/reports-monitoring/howto-analyze-activity-logs-log-analytics.md +++ b/articles/active-directory/reports-monitoring/howto-analyze-activity-logs-log-analytics.md @@ -32,9 +32,9 @@ In this article, you learn how to analyze the Azure AD activity logs in your Log To follow along, you need: -* A Log Analytics workspace in your Azure subscription. Learn how to [create a Log Analytics workspace](../../azure-monitor/logs/quick-create-workspace.md). +* A [Log Analytics workspace](../../azure-monitor/logs/log-analytics-workspace-overview.md) in your Azure subscription. Learn how to [create a Log Analytics workspace](../../azure-monitor/logs/quick-create-workspace.md). * First, complete the steps to [route the Azure AD activity logs to your Log Analytics workspace](howto-integrate-activity-logs-with-log-analytics.md). -* [Access](../../azure-monitor/logs/manage-access.md#manage-access-using-workspace-permissions) to the log analytics workspace +* [Access](../../azure-monitor/logs/manage-access.md#azure-rbac) to the log analytics workspace * The following roles in Azure Active Directory (if you are accessing Log Analytics through Azure Active Directory portal) - Security Admin - Security Reader diff --git a/articles/active-directory/reports-monitoring/howto-use-azure-monitor-workbooks.md b/articles/active-directory/reports-monitoring/howto-use-azure-monitor-workbooks.md index 3f5af0fc8742..77996e1393ea 100644 --- a/articles/active-directory/reports-monitoring/howto-use-azure-monitor-workbooks.md +++ b/articles/active-directory/reports-monitoring/howto-use-azure-monitor-workbooks.md @@ -63,7 +63,7 @@ To use Monitor workbooks, you need: - A [Log Analytics workspace](../../azure-monitor/logs/quick-create-workspace.md). -- [Access](../../azure-monitor/logs/manage-access.md#manage-access-using-workspace-permissions) to the log analytics workspace +- [Access](../../azure-monitor/logs/manage-access.md#azure-rbac) to the log analytics workspace - Following roles in Azure Active Directory (if you are accessing Log Analytics through Azure Active Directory portal) - Security administrator - Security reader @@ -72,7 +72,7 @@ To use Monitor workbooks, you need: ## Roles -To access workbooks in Azure Active Directory, you must have access to the underlying [Log Analytics](../../azure-monitor/logs/manage-access.md#manage-access-using-azure-permissions) workspace and be assigned to one of the following roles: +To access workbooks in Azure Active Directory, you must have access to the underlying [Log Analytics workspace](../../azure-monitor/logs/manage-access.md#azure-rbac) and be assigned to one of the following roles: - Global Reader diff --git a/articles/active-directory/reports-monitoring/reference-basic-info-sign-in-logs.md b/articles/active-directory/reports-monitoring/reference-basic-info-sign-in-logs.md index ccf4ede20ef6..1f9ca0537d77 100644 --- a/articles/active-directory/reports-monitoring/reference-basic-info-sign-in-logs.md +++ b/articles/active-directory/reports-monitoring/reference-basic-info-sign-in-logs.md @@ -94,7 +94,7 @@ This attribute describes the type of cross-tenant access used by the actor to ac - `b2bDirectConnect` - A cross tenant sign-in performed by a B2B. - `microsoftSupport`- A cross tenant sign-in performed by a Microsoft support agent in a Microsoft customer tenant. - `serviceProvider` - A cross-tenant sign-in performed by a Cloud Service Provider (CSP) or similar admin on behalf of that CSP's customer in a tenant -- `unknownFutureValue` - A sentinel value used by MS Graph to help clients handle changes in enum lists. For more information, see [Best practices for working with Microsoft Graph](https://docs.microsoft.com/graph/best-practices-concept). +- `unknownFutureValue` - A sentinel value used by MS Graph to help clients handle changes in enum lists. For more information, see [Best practices for working with Microsoft Graph](/graph/best-practices-concept). If the sign-in did not the pass the boundaries of a tenant, the value is `none`. @@ -135,4 +135,4 @@ This value shows whether continuous access evaluation (CAE) was applied to the s ## Next steps * [Sign-in logs in Azure Active Directory](concept-sign-ins.md) -* [What is the sign-in diagnostic in Azure AD?](overview-sign-in-diagnostics.md) +* [What is the sign-in diagnostic in Azure AD?](overview-sign-in-diagnostics.md) \ No newline at end of file diff --git a/articles/active-directory/roles/TOC.yml b/articles/active-directory/roles/TOC.yml index 0e6f67b70db3..37d428b5216a 100644 --- a/articles/active-directory/roles/TOC.yml +++ b/articles/active-directory/roles/TOC.yml @@ -58,7 +58,7 @@ href: groups-pim-eligible.md - name: Assign roles with scope using PowerShell href: custom-assign-powershell.md - - name: Assign roles using Graph API + - name: Assign roles using Microsoft Graph href: custom-assign-graph.md - name: Remove role assignments items: diff --git a/articles/active-directory/roles/groups-create-eligible.md b/articles/active-directory/roles/groups-create-eligible.md index 6bf2b3a04d0b..665883dcad46 100644 --- a/articles/active-directory/roles/groups-create-eligible.md +++ b/articles/active-directory/roles/groups-create-eligible.md @@ -102,7 +102,7 @@ Add-AzureADGroupMember -ObjectId $roleAssignablegroup.Id -RefObjectId $member.Ob ### Create a role-assignable group in Azure AD ```http -POST https://graph.microsoft.com/beta/groups +POST https://graph.microsoft.com/v1.0/groups { "description": "This group is assigned to Helpdesk Administrator built-in role of Azure AD.", "displayName": "Contoso_Helpdesk_Administrators", diff --git a/articles/active-directory/roles/manage-roles-portal.md b/articles/active-directory/roles/manage-roles-portal.md index 6c14ab435ed5..de4cf2176fdb 100644 --- a/articles/active-directory/roles/manage-roles-portal.md +++ b/articles/active-directory/roles/manage-roles-portal.md @@ -158,16 +158,11 @@ If PIM is enabled, you have additional capabilities, such as making a user eligi ## Microsoft Graph API -Follow these instructions to assign a role using the Microsoft Graph API in [Graph Explorer](https://aka.ms/ge). +Follow these instructions to assign a role using the Microsoft Graph API. ### Assign a role -In this example, a security principal with objectID `f8ca5a85-489a-49a0-b555-0a6d81e56f0d` is assigned the Billing Administrator role (role definition ID `b0f54661-2d74-4c50-afa3-1ec803f12efe`) at tenant scope. If you want to see the list of immutable role template IDs of all built-in roles, see [Azure AD built-in roles](permissions-reference.md). - -1. Sign in to the [Graph Explorer](https://aka.ms/ge). -2. Select **POST** as the HTTP method from the dropdown. -3. Select the API version to **v1.0**. -4. Use the [Create unifiedRoleAssignment](/graph/api/rbacapplication-post-roleassignments) API to assign roles. Add following details to the URL and Request Body and select **Run query**. +In this example, a security principal with objectID `f8ca5a85-489a-49a0-b555-0a6d81e56f0d` is assigned the Billing Administrator role (role definition ID `b0f54661-2d74-4c50-afa3-1ec803f12efe`) at tenant scope. To see the list of immutable role template IDs of all built-in roles, see [Azure AD built-in roles](permissions-reference.md). ```http POST https://graph.microsoft.com/v1.0/roleManagement/directory/roleAssignments @@ -183,19 +178,16 @@ Content-type: application/json ### Assign a role using PIM -In this example, a security principal with objectID `f8ca5a85-489a-49a0-b555-0a6d81e56f0d` is assigned a time-bound eligible role assignment to Billing Administrator (role definition ID `b0f54661-2d74-4c50-afa3-1ec803f12efe`) for 180 days. +#### Assign a time-bound eligible role assignment -1. Sign in to the [Graph Explorer](https://aka.ms/ge). -2. Select **POST** as the HTTP method from the dropdown. -3. Select the API version to **beta**. -4. Use the [Create unifiedRoleEligibilityScheduleRequest](/graph/api/unifiedroleeligibilityschedulerequest-post-unifiedroleeligibilityschedulerequests) API to assign roles using PIM. Add following details to the URL and Request Body and select **Run query**. +In this example, a security principal with objectID `f8ca5a85-489a-49a0-b555-0a6d81e56f0d` is assigned a time-bound eligible role assignment to Billing Administrator (role definition ID `b0f54661-2d74-4c50-afa3-1ec803f12efe`) for 180 days. ```http -POST https://graph.microsoft.com/beta/rolemanagement/directory/roleEligibilityScheduleRequests +POST https://graph.microsoft.com/v1.0/rolemanagement/directory/roleEligibilityScheduleRequests Content-type: application/json { - "action": "AdminAssign", + "action": "adminAssign", "justification": "for managing admin tasks", "directoryScopeId": "/", "principalId": "f8ca5a85-489a-49a0-b555-0a6d81e56f0d", @@ -203,21 +195,23 @@ Content-type: application/json "scheduleInfo": { "startDateTime": "2021-07-15T19:15:08.941Z", "expiration": { - "type": "AfterDuration", + "type": "afterDuration", "duration": "PT180D" } } } ``` +#### Assign a permanent eligible role assignment + In the following example, a security principal is assigned a permanent eligible role assignment to Billing Administrator. ```http -POST https://graph.microsoft.com/beta/rolemanagement/directory/roleEligibilityScheduleRequests +POST https://graph.microsoft.com/v1.0/rolemanagement/directory/roleEligibilityScheduleRequests Content-type: application/json { - "action": "AdminAssign", + "action": "adminAssign", "justification": "for managing admin tasks", "directoryScopeId": "/", "principalId": "f8ca5a85-489a-49a0-b555-0a6d81e56f0d", @@ -225,20 +219,22 @@ Content-type: application/json "scheduleInfo": { "startDateTime": "2021-07-15T19:15:08.941Z", "expiration": { - "type": "NoExpiration" + "type": "noExpiration" } } } ``` -To activate the role assignment, use the [Create unifiedRoleAssignmentScheduleRequest](/graph/api/unifiedroleassignmentschedulerequest-post-unifiedroleassignmentschedulerequests) API. +#### Activate a role assignment + +To activate the role assignment, use the [Create roleAssignmentScheduleRequests](/graph/api/rbacapplication-post-roleeligibilityschedulerequests) API. ```http -POST https://graph.microsoft.com/beta/roleManagement/directory/roleAssignmentScheduleRequests +POST https://graph.microsoft.com/v1.0/roleManagement/directory/roleAssignmentScheduleRequests Content-type: application/json { - "action": "SelfActivate", + "action": "selfActivate", "justification": "activating role assignment for admin privileges", "roleDefinitionId": "b0f54661-2d74-4c50-afa3-1ec803f12efe", "directoryScopeId": "/", @@ -246,6 +242,8 @@ Content-type: application/json } ``` +For more information about managing Azure AD roles through the PIM API in Microsoft Graph, see [Overview of role management through the privileged identity management (PIM) API](/graph/api/resources/privilegedidentitymanagementv3-overview). + ## Next steps - [List Azure AD role assignments](view-assignments.md) diff --git a/articles/active-directory/saas-apps/blinq-provisioning-tutorial.md b/articles/active-directory/saas-apps/blinq-provisioning-tutorial.md new file mode 100644 index 000000000000..526baf280858 --- /dev/null +++ b/articles/active-directory/saas-apps/blinq-provisioning-tutorial.md @@ -0,0 +1,182 @@ +--- +title: 'Tutorial: Configure Blinq for automatic user provisioning with Azure Active Directory | Microsoft Docs' +description: Learn how to automatically provision and de-provision user accounts from Azure AD to Blinq. +services: active-directory +author: twimmers +writer: twimmers +manager: beatrizd +ms.assetid: 5b076ac0-cd0e-43c3-85ed-8591bfd424ff +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 05/25/2022 +ms.author: thwimmer +--- + +# Tutorial: Configure Blinq for automatic user provisioning + +This tutorial describes the steps you need to do in both Blinq and Azure Active Directory (Azure AD) to configure automatic user provisioning. When configured, Azure AD automatically provisions and de-provisions users and groups to [Blinq](https://blinq.me/) using the Azure AD Provisioning service. For important details on what this service does, how it works, and frequently asked questions, see [Automate user provisioning and deprovisioning to SaaS applications with Azure Active Directory](../app-provisioning/user-provisioning.md). + + +## Capabilities supported +> [!div class="checklist"] +> * Create users in Blinq. +> * Remove users in Blinq when they do not require access anymore. +> * Keep user attributes synchronized between Azure AD and Blinq. + +## Prerequisites + +The scenario outlined in this tutorial assumes that you already have the following prerequisites: + +* [An Azure AD tenant](../develop/quickstart-create-new-tenant.md) +* A user account in Azure AD with [permission](../roles/permissions-reference.md) to configure provisioning (for example, Application Administrator, Cloud Application administrator, Application Owner, or Global Administrator). +* A user account in Blinq with Admin permission + +## Step 1. Plan your provisioning deployment +1. Learn about [how the provisioning service works](../app-provisioning/user-provisioning.md). +1. Determine who will be in [scope for provisioning](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). +1. Determine what data to [map between Azure AD and Blinq](../app-provisioning/customize-application-attributes.md). + +## Step 2. Configure Blinq to support provisioning with Azure AD + +1. Navigate to [Blinq Admin Console](https://dash.blinq.me) in a separate browser tab. +1. If you aren't logged in to Blinq you will need to do so. +1. Click on your workspace in the top left corner of the screen. +1. In the dropdown click **Settings**. +1. Under the **Integrations** page you should see **Team Card Provisioning** which contains a URL and Token. You will need to generate the token by clicking **Generate**. +Copy the **URL** and **Token**. The URL and the Token are to be inserted into the **Tenant URL*** and **Secret Token** field in the Azure portal respectively. + +## Step 3. Add Blinq from the Azure AD application gallery + +Add Blinq from the Azure AD application gallery to start managing provisioning to Blinq. If you have previously setup Blinq for SSO, you can use the same application. However it's recommended you create a separate app when testing out the integration initially. Learn more about adding an application from the gallery [here](../manage-apps/add-application-portal.md). + +## Step 4. Define who will be in scope for provisioning + +The Azure AD provisioning service allows you to scope who will be provisioned based on assignment to the application and or based on attributes of the user and group. If you choose to scope who will be provisioned to your app based on assignment, you can use the following [steps](../manage-apps/assign-user-or-group-access-portal.md) to assign users and groups to the application. If you choose to scope who will be provisioned based solely on attributes of the user or group, you can use a scoping filter as described [here](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +* Start small. Test with a small set of users and groups before rolling out to everyone. When scope for provisioning is set to assigned users and groups, you can control this by assigning one or two users or groups to the app. When scope is set to all users and groups, you can specify an [attribute based scoping filter](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +* If you need additional roles, you can [update the application manifest](../develop/howto-add-app-roles-in-azure-ad-apps.md) to add new roles. + + +## Step 5. Configure automatic user provisioning to Blinq + +This section guides you through the steps to configure the Azure AD provisioning service to create, update, and disable users and groups in Blinq based on user and group assignments in Azure AD. + +### To configure automatic user provisioning for Blinq in Azure AD: + +1. Sign in to the [Azure portal](https://portal.azure.com). Select **Enterprise Applications**, then select **All applications**. + + ![Screenshot of Enterprise applications blade.](common/enterprise-applications.png) + +1. In the applications list, select **Blinq**. + + ![Screenshot of the Blinq link in the Applications list.](common/all-applications.png) + +1. Select the **Provisioning** tab. + + ![Screenshot of Provisioning tab.](common/provisioning.png) + +1. Set the **Provisioning Mode** to **Automatic**. + + ![Screenshot of Provisioning tab automatic.](common/provisioning-automatic.png) + +1. In the **Admin Credentials** section, input your Blinq Tenant URL and Secret Token. Click **Test Connection** to ensure Azure AD can connect to Blinq. If the connection fails, ensure your Blinq account has Admin permissions and try again. + + ![Screenshot of Token field.](common/provisioning-testconnection-tenanturltoken.png) + +1. In the **Notification Email** field, enter the email address of a person or group who should receive the provisioning error notifications and select the **Send an email notification when a failure occurs** check box. + + ![Screenshot of Notification Email.](common/provisioning-notification-email.png) + +1. Select **Save**. + +1. In the **Mappings** section, select **Synchronize Azure Active Directory Users to Blinq**. + +1. Review the user attributes that are synchronized from Azure AD to Blinq in the **Attribute-Mapping** section. The attributes selected as **Matching** properties are used to match the user accounts in Blinq for update operations. If you choose to change the [matching target attribute](../app-provisioning/customize-application-attributes.md), you'll need to ensure that the Blinq API supports filtering users based on that attribute. Select the **Save** button to commit any changes. + + |Attribute|Type|Supported for filtering|Required by Blinq| + |---|---|---|---| + |userName|String|✓|✓ + |active|Boolean|| + |displayName|String|| + |nickName|String|| + |title|String|| + |preferredLanguage|String|| + |locale|String|| + |timezone|String|| + |name.givenName|String|| + |name.familyName|String|| + |name.formatted|String|| + |name.middleName|String|| + |name.honorificPrefix|String|| + |name.honorificSuffix|String|| + |externalId|String|| + |emails[type eq "work"].value|String|| + |emails[type eq "home"].value|String|| + |emails[type eq "other"].value|String|| + |phoneNumbers[type eq "work"].value|String|| + |phoneNumbers[type eq "mobile"].value|String|| + |phoneNumbers[type eq "fax"].value|String|| + |phoneNumbers[type eq "home"].value|String|| + |phoneNumbers[type eq "other"].value|String|| + |phoneNumbers[type eq "pager"].value|String|| + |addresses[type eq "work"].formatted|String|| + |addresses[type eq "work"].streetAddress|String|| + |addresses[type eq "work"].locality|String|| + |addresses[type eq "work"].region|String|| + |addresses[type eq "work"].postalCode|String|| + |addresses[type eq "work"].country|String|| + |addresses[type eq "home"].formatted|String|| + |addresses[type eq "home"].streetAddress|String|| + |addresses[type eq "home"].locality|String|| + |addresses[type eq "home"].region|String|| + |addresses[type eq "home"].postalCode|String|| + |addresses[type eq "home"].country|String|| + |addresses[type eq "other"].formatted|String|| + |addresses[type eq "other"].streetAddress|String|| + |addresses[type eq "other"].locality|String|| + |addresses[type eq "other"].region|String|| + |addresses[type eq "other"].postalCode|String|| + |addresses[type eq "other"].country|String|| + |urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:employeeNumber|String|| + |urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:organization|String|| + |urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:division|String|| + |urn:ietf:params:scim:schemas:extension:enterprise:2.0:User:department|String|| + + +1. To configure scoping filters, refer to the following instructions provided in the [Scoping filter tutorial](../app-provisioning/define-conditional-rules-for-provisioning-user-accounts.md). + +1. To enable the Azure AD provisioning service for Blinq, change the **Provisioning Status** to **On** in the **Settings** section. + + ![Screenshot of Provisioning Status Toggled On.](common/provisioning-toggle-on.png) + +1. Define the users and groups that you would like to provision to Blinq by choosing the desired values in **Scope** in the **Settings** section. + + ![Screenshot of Provisioning Scope.](common/provisioning-scope.png) + +1. When you're ready to provision, click **Save**. + + ![Screenshot of Saving Provisioning Configuration.](common/provisioning-configuration-save.png) + +This operation starts the initial synchronization cycle of all users and groups defined in **Scope** in the **Settings** section. The initial cycle takes longer to complete than next cycles, which occur approximately every 40 minutes as long as the Azure AD provisioning service is running. + +## Step 6. Monitor your deployment +Once you've configured provisioning, use the following resources to monitor your deployment: + +* Use the [provisioning logs](../reports-monitoring/concept-provisioning-logs.md) to determine which users have been provisioned successfully or unsuccessfully +* Check the [progress bar](../app-provisioning/application-provisioning-when-will-provisioning-finish-specific-user.md) to see the status of the provisioning cycle and how close it's to completion +* If the provisioning configuration seems to be in an unhealthy state, the application will go into quarantine. Learn more about quarantine states [here](../app-provisioning/application-provisioning-quarantine-status.md). + +## Change Logs +05/25/2022 - **Schema Discovery** feature enabled on this app. + +## More resources + +* [Managing user account provisioning for Enterprise Apps](../app-provisioning/configure-automatic-user-provisioning-portal.md) +* [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) + +## Next steps + +* [Learn how to review logs and get reports on provisioning activity](../app-provisioning/check-status-user-account-provisioning.md) \ No newline at end of file diff --git a/articles/active-directory/saas-apps/cerby-provisioning-tutorial.md b/articles/active-directory/saas-apps/cerby-provisioning-tutorial.md index 9263823daea0..4445778ab45d 100644 --- a/articles/active-directory/saas-apps/cerby-provisioning-tutorial.md +++ b/articles/active-directory/saas-apps/cerby-provisioning-tutorial.md @@ -138,6 +138,16 @@ Once you've configured provisioning, use the following resources to monitor your * Check the [progress bar](../app-provisioning/application-provisioning-when-will-provisioning-finish-specific-user.md) to see the status of the provisioning cycle and how close it's to completion * If the provisioning configuration seems to be in an unhealthy state, the application will go into quarantine. Learn more about quarantine states [here](../app-provisioning/application-provisioning-quarantine-status.md). +## Troubleshooting Tips +If you need to regenerate the SCIM API authentication token, complete the following steps: + +1. Send an email with your request to [Cerby Support Team](mailto:support@cerby.com). The Cerby team regenerates the SCIM API authentication token. +1. Receive the response email from Cerby to confirm that the token was successfully regenerated. +1. Complete the instructions from the [How to Retrieve the SCIM API Authentication Token from Cerby](https://help.cerby.com/en/articles/5638472-how-to-configure-automatic-user-provisioning-for-azure-ad) article to retrieve the new token. + + >[!NOTE] + >The Cerby team is currently developing a self-service solution for regenerating the SCIM API authentication token. To regenerate the token, the Cerby team members must validate their identity. + ## More resources * [Managing user account provisioning for Enterprise Apps](../app-provisioning/configure-automatic-user-provisioning-portal.md) diff --git a/articles/active-directory/saas-apps/cisco-umbrella-tutorial.md b/articles/active-directory/saas-apps/cisco-umbrella-tutorial.md index afb70f435405..d934197ca71a 100644 --- a/articles/active-directory/saas-apps/cisco-umbrella-tutorial.md +++ b/articles/active-directory/saas-apps/cisco-umbrella-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Cisco Umbrella Admin SSO | Microsoft Docs' +title: 'Tutorial: Azure AD integration with Cisco Umbrella Admin SSO' description: Learn how to configure single sign-on between Azure Active Directory and Cisco Umbrella Admin SSO. services: active-directory author: jeevansd @@ -9,10 +9,10 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 03/16/2021 +ms.date: 05/24/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Cisco Umbrella Admin SSO +# Tutorial: Azure AD integration with Cisco Umbrella Admin SSO In this tutorial, you'll learn how to integrate Cisco Umbrella Admin SSO with Azure Active Directory (Azure AD). When you integrate Cisco Umbrella Admin SSO with Azure AD, you can: @@ -27,6 +27,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * Cisco Umbrella Admin SSO single sign-on (SSO) enabled subscription. +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. @@ -65,7 +68,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 4. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. @@ -77,11 +80,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 6. On the **Set up Cisco Umbrella Admin SSO** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy configuration appropriate U R L.](common/copy-configuration-urls.png "Attributes") ### Create an Azure AD test user @@ -113,27 +116,27 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 2. From the left side of menu, click **Admin** and navigate to **Authentication** and then click on **SAML**. - ![The Admin](./media/cisco-umbrella-tutorial/admin.png) + ![Screenshot shows the Admin menu window.](./media/cisco-umbrella-tutorial/admin.png "Administrator") 3. Choose **Other** and click on **NEXT**. - ![The Other](./media/cisco-umbrella-tutorial/other.png) + ![Screenshot shows the Other menu window.](./media/cisco-umbrella-tutorial/other.png "Folder") 4. On the **Cisco Umbrella Admin SSO Metadata**, page, click **NEXT**. - ![The metadata](./media/cisco-umbrella-tutorial/metadata.png) + ![Screenshot shows the metadata file page.](./media/cisco-umbrella-tutorial/metadata.png "File") 5. On the **Upload Metadata** tab, if you had pre-configured SAML, select **Click here to change them** option and follow the below steps. - ![The Next](./media/cisco-umbrella-tutorial/next.png) + ![Screenshot shows the Next Folder window.](./media/cisco-umbrella-tutorial/next.png "Values") 6. In the **Option A: Upload XML file**, upload the **Federation Metadata XML** file that you downloaded from the Azure portal and after uploading metadata the below values get auto populated automatically then click **NEXT**. - ![The choosefile](./media/cisco-umbrella-tutorial/choose-file.png) + ![Screenshot shows the choosefile from folder.](./media/cisco-umbrella-tutorial/choose-file.png "Federation") 7. Under **Validate SAML Configuration** section, click **TEST YOUR SAML CONFIGURATION**. - ![The Test](./media/cisco-umbrella-tutorial/test.png) + ![Screenshot shows the Test SAML Configuration.](./media/cisco-umbrella-tutorial/test.png "Validate") 8. Click **SAVE**. @@ -148,11 +151,11 @@ In the case of Cisco Umbrella Admin SSO, provisioning is a manual task. 2. From the left side of menu, click **Admin** and navigate to **Accounts**. - ![The Account](./media/cisco-umbrella-tutorial/account.png) + ![Screenshot shows the Account of Cisco Umbrella Admin.](./media/cisco-umbrella-tutorial/account.png "Account") 3. On the **Accounts** page, click on **Add** on the top right side of the page and perform the following steps. - ![The User](./media/cisco-umbrella-tutorial/create-user.png) + ![Screenshot shows the User of Accounts.](./media/cisco-umbrella-tutorial/create-user.png "User") a. In the **First Name** field, enter the firstname like **Britta**. diff --git a/articles/active-directory/saas-apps/empactis-tutorial.md b/articles/active-directory/saas-apps/empactis-tutorial.md index e2dd617da21c..0e31ecbe34a8 100644 --- a/articles/active-directory/saas-apps/empactis-tutorial.md +++ b/articles/active-directory/saas-apps/empactis-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Empactis | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Empactis' description: Learn how to configure single sign-on between Azure Active Directory and Empactis. services: active-directory author: jeevansd @@ -9,175 +9,116 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 03/13/2019 +ms.date: 05/26/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Empactis +# Tutorial: Azure AD SSO integration with Empactis -In this tutorial, you learn how to integrate Empactis with Azure Active Directory (Azure AD). -Integrating Empactis with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate Empactis with Azure Active Directory (Azure AD). When you integrate Empactis with Azure AD, you can: -* You can control in Azure AD who has access to Empactis. -* You can enable your users to be automatically signed-in to Empactis (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to Empactis. +* Enable your users to be automatically signed-in to Empactis with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with Empactis, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* Empactis single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Empactis single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* Empactis supports **IDP** initiated SSO +* Empactis supports **IDP** initiated SSO. -## Adding Empactis from the gallery +## Add Empactis from the gallery To configure the integration of Empactis into Azure AD, you need to add Empactis from the gallery to your list of managed SaaS apps. -**To add Empactis from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. - - ![The New application button](common/add-new-app.png) - -4. In the search box, type **Empactis**, select **Empactis** from result panel then click **Add** button to add the application. - - ![Empactis in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Empactis** in the search box. +1. Select **Empactis** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -In this section, you configure and test Azure AD single sign-on with Empactis based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in Empactis needs to be established. +## Configure and test Azure AD SSO for Empactis -To configure and test Azure AD single sign-on with Empactis, you need to complete the following building blocks: +Configure and test Azure AD SSO with Empactis using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Empactis. -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure Empactis Single Sign-On](#configure-empactis-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create Empactis test user](#create-empactis-test-user)** - to have a counterpart of Britta Simon in Empactis that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +To configure and test Azure AD SSO with Empactis, perform the following steps: -### Configure Azure AD single sign-on +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Empactis SSO](#configure-empactis-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Empactis test user](#create-empactis-test-user)** - to have a counterpart of B.Simon in Empactis that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -In this section, you enable Azure AD single sign-on in the Azure portal. +## Configure Azure AD SSO -To configure Azure AD single sign-on with Empactis, perform the following steps: +Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the [Azure portal](https://portal.azure.com/), on the **Empactis** application integration page, select **Single sign-on**. +1. In the Azure portal, on the **Empactis** application integration page, find the **Manage** section and select **Single sign-on**. +1. On the **Select a Single sign-on method** page, select **SAML**. +1. On the **Set up Single Sign-On with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Configure single sign-on link](common/select-sso.png) - -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. - - ![Single sign-on select mode](common/select-saml-option.png) - -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. - - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 4. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. - ![Empactis Domain and URLs single sign-on information](common/preintegrated.png) - 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Certificate (Base64)** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 6. On the **Set up Empactis** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure AD Identifier - - c. Logout URL - -### Configure Empactis Single Sign-On - -To configure single sign-on on **Empactis** side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [Empactis support team](mailto:support@empactis.com). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) +In this section, you'll create a test user in the Azure portal called B.Simon. -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field, enter **BrittaSimon**. - - b. In the **User name** field, type **brittasimon@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com - - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. - - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to Empactis. - -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **Empactis**. +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Empactis. - ![Enterprise applications blade](common/enterprise-applications.png) +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Empactis**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. +1. In the **Add Assignment** dialog, click the **Assign** button. -2. In the applications list, select **Empactis**. +## Configure Empactis SSO - ![The Empactis link in the Applications list](common/all-applications.png) - -3. In the menu on the left, select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog, select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog, click the **Assign** button. +To configure single sign-on on **Empactis** side, you need to send the downloaded **Certificate (Base64)** and appropriate copied URLs from Azure portal to [Empactis support team](mailto:support@empactis.com). They set this setting to have the SAML SSO connection set properly on both sides. ### Create Empactis test user In this section, you create a user called Britta Simon in Empactis. Work with [Empactis support team](mailto:support@empactis.com) to add the users in the Empactis platform. Users must be created and activated before you use single sign-on. -### Test single sign-on - -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +## Test SSO -When you click the Empactis tile in the Access Panel, you should be automatically signed in to the Empactis for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +In this section, you test your Azure AD single sign-on configuration with following options. -## Additional Resources +* Click on Test this application in Azure portal and you should be automatically signed in to the Empactis for which you set up the SSO. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the Empactis tile in the My Apps, you should be automatically signed in to the Empactis for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure Empactis you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/flexera-one-tutorial.md b/articles/active-directory/saas-apps/flexera-one-tutorial.md index 589f4ce1b270..797d7f33f627 100644 --- a/articles/active-directory/saas-apps/flexera-one-tutorial.md +++ b/articles/active-directory/saas-apps/flexera-one-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 12/29/2021 +ms.date: 05/24/2022 ms.author: jeedes --- @@ -29,6 +29,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * Flexera One single sign-on (SSO) enabled subscription. +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -68,7 +71,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, perform the following steps: @@ -86,7 +89,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. Flexera One application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/default-attributes.png) + ![Screenshot shows the image of Flexera One application.](common/default-attributes.png "Attributes") 1. In addition to above, Flexera One application expects few more attributes to be passed back in SAML response which are shown below. These attributes are also pre populated but you can review them as per your requirements. @@ -97,11 +100,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 1. On the **Set up Flexera One** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy Configuration appropriate U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user diff --git a/articles/active-directory/saas-apps/iauditor-tutorial.md b/articles/active-directory/saas-apps/iauditor-tutorial.md index 05dae794f984..85c724738f2d 100644 --- a/articles/active-directory/saas-apps/iauditor-tutorial.md +++ b/articles/active-directory/saas-apps/iauditor-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 03/24/2022 +ms.date: 05/24/2022 ms.author: jeedes --- @@ -31,6 +31,9 @@ To get started, you need the following items: * Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. For more information, see [Azure built-in roles](../roles/permissions-reference.md). +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -69,7 +72,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, if you wish to configure the application in **IDP** initiated mode, perform the following steps: @@ -96,7 +99,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. iAuditor application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/default-attributes.png) + ![Screenshot shows the image of iAuditor application.](common/default-attributes.png "Attributes") 1. In addition to above, iAuditor application expects few more attributes to be passed back in SAML response which are shown below. These attributes are also pre-populated but you can review them as per your requirements. @@ -108,7 +111,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (PEM)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificate-base64-download.png) + ![Screenshot shows the Certificate download link.](common/certificate-base64-download.png "Certificate") ### Create an Azure AD test user diff --git a/articles/active-directory/saas-apps/iwellnessnow-tutorial.md b/articles/active-directory/saas-apps/iwellnessnow-tutorial.md index d01b709b63e3..cda1ff050167 100644 --- a/articles/active-directory/saas-apps/iwellnessnow-tutorial.md +++ b/articles/active-directory/saas-apps/iwellnessnow-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with iWellnessNow | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with iWellnessNow' description: Learn how to configure single sign-on between Azure Active Directory and iWellnessNow. services: active-directory author: jeevansd @@ -9,11 +9,11 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 08/07/2019 +ms.date: 05/26/2022 ms.author: jeedes --- -# Tutorial: Integrate iWellnessNow with Azure Active Directory +# Tutorial: Azure AD SSO integration with iWellnessNow In this tutorial, you'll learn how to integrate iWellnessNow with Azure Active Directory (Azure AD). When you integrate iWellnessNow with Azure AD, you can: @@ -21,101 +21,93 @@ In this tutorial, you'll learn how to integrate iWellnessNow with Azure Active D * Enable your users to be automatically signed-in to iWellnessNow with their Azure AD accounts. * Manage your accounts in one central location - the Azure portal. -To learn more about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). - ## Prerequisites To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * iWellnessNow single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. -* iWellnessNow supports **SP and IDP** initiated SSO +* iWellnessNow supports **SP and IDP** initiated SSO. -## Adding iWellnessNow from the gallery +## Add iWellnessNow from the gallery To configure the integration of iWellnessNow into Azure AD, you need to add iWellnessNow from the gallery to your list of managed SaaS apps. -1. Sign in to the [Azure portal](https://portal.azure.com) using either a work or school account, or a personal Microsoft account. +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. 1. On the left navigation pane, select the **Azure Active Directory** service. 1. Navigate to **Enterprise Applications** and then select **All Applications**. 1. To add new application, select **New application**. 1. In the **Add from the gallery** section, type **iWellnessNow** in the search box. 1. Select **iWellnessNow** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -## Configure and test Azure AD single sign-on +## Configure and test Azure AD SSO for iWellnessNow Configure and test Azure AD SSO with iWellnessNow using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in iWellnessNow. -To configure and test Azure AD SSO with iWellnessNow, complete the following building blocks: +To configure and test Azure AD SSO with iWellnessNow, perform the following steps: 1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. -2. **[Configure iWellnessNow SSO](#configure-iwellnessnow-sso)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. -5. **[Create iWellnessNow test user](#create-iwellnessnow-test-user)** - to have a counterpart of B.Simon in iWellnessNow that is linked to the Azure AD representation of user. -6. **[Test SSO](#test-sso)** - to verify whether the configuration works. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure iWellnessNow SSO](#configure-iwellnessnow-sso)** - to configure the single sign-on settings on application side. + 1. **[Create iWellnessNow test user](#create-iwellnessnow-test-user)** - to have a counterpart of B.Simon in iWellnessNow that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -### Configure Azure AD SSO +## Configure Azure AD SSO Follow these steps to enable Azure AD SSO in the Azure portal. -1. In the [Azure portal](https://portal.azure.com/), on the **iWellnessNow** application integration page, find the **Manage** section and select **Single sign-on**. +1. In the Azure portal, on the **iWellnessNow** application integration page, find the **Manage** section and select **Single sign-on**. 1. On the **Select a Single sign-on method** page, select **SAML**. -1. On the **Set up Single Sign-On with SAML** page, click the edit/pen icon for **Basic SAML Configuration** to edit the settings. +1. On the **Set up Single Sign-On with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, if you have **Service Provider metadata file** and wish to configure in **IDP** initiated mode, perform the following steps: a. Click **Upload metadata file**. - ![Upload metadata file](common/upload-metadata.png) + ![Screenshot shows to upload metadata file.](common/upload-metadata.png "Metadata") b. Click on **folder logo** to select the metadata file and click **Upload**. - ![choose metadata file](common/browse-upload-metadata.png) + ![Screenshot shows to choose metadata file.](common/browse-upload-metadata.png "Folder") c. After the metadata file is successfully uploaded, the **Identifier** and **Reply URL** values get auto populated in Basic SAML Configuration section. - ![Screenshot shows the Basic SAML Configuration, where you can enter Reply U R L, and select Save.](common/idp-intiated.png) - > [!Note] - > If the **Identifier** and **Reply URL** values do not get auto polulated, then fill in the values manually according to your requirement. + > If the **Identifier** and **Reply URL** values do not get auto populated, then fill in the values manually according to your requirement. 1. If you don't have **Service Provider metadata file** and wish to configure the application in **IDP** initiated mode, perform the following steps: - ![iWellnessNow Domain and URLs single sign-on information](common/idp-intiated.png) - - a. In the **Identifier** textbox, type a URL using the following pattern: `http://.iwellnessnow.com` + a. In the **Identifier** textbox, type a URL using the following pattern: + `http://.iwellnessnow.com` - b. In the **Reply URL** textbox, type a URL using the following pattern: `https://.iwellnessnow.com/ssologin` + b. In the **Reply URL** textbox, type a URL using the following pattern: + `https://.iwellnessnow.com/ssologin` 1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: - ![Screenshot shows Set additional U R Ls where you can enter a Sign on U R L.](common/metadata-upload-additional-signon.png) - In the **Sign-on URL** text box, type a URL using the following pattern: `https://.iwellnessnow.com/` > [!NOTE] - > These values are not real. Update these values with the actual Sign-on URL, Identifier and Reply URL. Contact [iWellnessNow Client support team](mailto:info@iwellnessnow.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign on URL. Contact [iWellnessNow Client support team](mailto:info@iwellnessnow.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. 1. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, find **Metadata XML** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 1. On the **Set up iWellnessNow** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - -### Configure iWellnessNow SSO - -To configure single sign-on on **iWellnessNow** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [iWellnessNow support team](mailto:info@iwellnessnow.com). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user @@ -136,31 +128,35 @@ In this section, you'll enable B.Simon to use Azure single sign-on by granting a 1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. 1. In the applications list, select **iWellnessNow**. 1. In the app's overview page, find the **Manage** section and select **Users and groups**. - - ![The "Users and groups" link](common/users-groups-blade.png) - 1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add User link](common/add-assign-user.png) - 1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. 1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. 1. In the **Add Assignment** dialog, click the **Assign** button. +## Configure iWellnessNow SSO + +To configure single sign-on on **iWellnessNow** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [iWellnessNow support team](mailto:info@iwellnessnow.com). They set this setting to have the SAML SSO connection set properly on both sides. + ### Create iWellnessNow test user In this section, you create a user called Britta Simon in iWellnessNow. Work with [iWellnessNow support team](mailto:info@iwellnessnow.com) to add the users in the iWellnessNow platform. Users must be created and activated before you use single sign-on. -### Test SSO +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to iWellnessNow Sign on URL where you can initiate the login flow. -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +* Go to iWellnessNow Sign-on URL directly and initiate the login flow from there. -When you click the iWellnessNow tile in the Access Panel, you should be automatically signed in to the iWellnessNow for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +#### IDP initiated: -## Additional resources +* Click on **Test this application** in Azure portal and you should be automatically signed in to the iWellnessNow for which you set up the SSO. -- [ List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory ](./tutorial-list.md) +You can also use Microsoft My Apps to test the application in any mode. When you click the iWellnessNow tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the iWellnessNow for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory? ](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is conditional access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure iWellnessNow you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/jobbadmin-tutorial.md b/articles/active-directory/saas-apps/jobbadmin-tutorial.md index bfffb9b1b888..8e65d7e1ecc0 100644 --- a/articles/active-directory/saas-apps/jobbadmin-tutorial.md +++ b/articles/active-directory/saas-apps/jobbadmin-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with Jobbadmin | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with Jobbadmin' description: Learn how to configure single sign-on between Azure Active Directory and Jobbadmin. services: active-directory author: jeevansd @@ -9,186 +9,129 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/25/2019 +ms.date: 02/25/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with Jobbadmin +# Tutorial: Azure AD SSO integration with Jobbadmin -In this tutorial, you learn how to integrate Jobbadmin with Azure Active Directory (Azure AD). -Integrating Jobbadmin with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate Jobbadmin with Azure Active Directory (Azure AD). When you integrate Jobbadmin with Azure AD, you can: -* You can control in Azure AD who has access to Jobbadmin. -* You can enable your users to be automatically signed-in to Jobbadmin (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to Jobbadmin. +* Enable your users to be automatically signed-in to Jobbadmin with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with Jobbadmin, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* Jobbadmin single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Jobbadmin single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* Jobbadmin supports **SP** initiated SSO +* Jobbadmin supports **SP** initiated SSO. -## Adding Jobbadmin from the gallery +## Add Jobbadmin from the gallery To configure the integration of Jobbadmin into Azure AD, you need to add Jobbadmin from the gallery to your list of managed SaaS apps. -**To add Jobbadmin from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. - - ![The New application button](common/add-new-app.png) - -4. In the search box, type **Jobbadmin**, select **Jobbadmin** from result panel then click **Add** button to add the application. - - ![Jobbadmin in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on - -In this section, you configure and test Azure AD single sign-on with Jobbadmin based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in Jobbadmin needs to be established. +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Jobbadmin** in the search box. +1. Select **Jobbadmin** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -To configure and test Azure AD single sign-on with Jobbadmin, you need to complete the following building blocks: +## Configure and test Azure AD SSO for Jobbadmin -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure Jobbadmin Single Sign-On](#configure-jobbadmin-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create Jobbadmin test user](#create-jobbadmin-test-user)** - to have a counterpart of Britta Simon in Jobbadmin that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. +Configure and test Azure AD SSO with Jobbadmin using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Jobbadmin. -### Configure Azure AD single sign-on +To configure and test Azure AD SSO with Jobbadmin, perform the following steps: -In this section, you enable Azure AD single sign-on in the Azure portal. +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Jobbadmin SSO](#configure-jobbadmin-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Jobbadmin test user](#create-jobbadmin-test-user)** - to have a counterpart of B.Simon in Jobbadmin that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -To configure Azure AD single sign-on with Jobbadmin, perform the following steps: +## Configure Azure AD SSO -1. In the [Azure portal](https://portal.azure.com/), on the **Jobbadmin** application integration page, select **Single sign-on**. +Follow these steps to enable Azure AD SSO in the Azure portal. - ![Configure single sign-on link](common/select-sso.png) - -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. - - ![Single sign-on select mode](common/select-saml-option.png) - -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. - - ![Edit Basic SAML Configuration](common/edit-urls.png) +1. In the Azure portal, on the **Jobbadmin** application integration page, find the **Manage** section and select **single sign-on**. +2. On the **Select a single sign-on method** page, select **SAML**. +3. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 4. On the **Basic SAML Configuration** section, perform the following steps: - ![Jobbadmin Domain and URLs single sign-on information](common/sp-identifier-reply.png) - - a. In the **Sign on URL** text box, type a URL using the following pattern: - `https://.jobbnorge.no/auth/saml2/login.ashx` - - b. In the **Identifier (Entity ID)** text box, type a URL using the following pattern: + a. In the **Identifier (Entity ID)** text box, type a URL using the following pattern: `https://.jobnorge.no` - c. In the **Reply URL** textbox, type a URL using the following pattern: `https://.jobbnorge.no/auth/saml2/login.ashx` + b. In the **Reply URL** textbox, type a URL using the following pattern: `https://.jobbnorge.no/auth/saml2/login.ashx` + + c. In the **Sign on URL** text box, type a URL using the following pattern: + `https://.jobbnorge.no/auth/saml2/login.ashx` > [!NOTE] - > These values are not real. Update these values with the actual Sign on URL, Identifier and Reply URL. Contact [Jobbadmin Client support team](https://www.jobbnorge.no/om-oss/kontakt-oss) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign on URL. Contact [Jobbadmin Client support team](https://www.jobbnorge.no/om-oss/kontakt-oss) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 6. On the **Set up Jobbadmin** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure Ad Identifier - - c. Logout URL - -### Configure Jobbadmin Single Sign-On - -To configure single sign-on on **Jobbadmin** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Jobbadmin support team](https://www.jobbnorge.no/om-oss/kontakt-oss). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com +In this section, you'll create a test user in the Azure portal called B.Simon. - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. - - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to Jobbadmin. - -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **Jobbadmin**. - - ![Enterprise applications blade](common/enterprise-applications.png) - -2. In the applications list, select **Jobbadmin**. - - ![The Jobbadmin link in the Applications list](common/all-applications.png) +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Jobbadmin. -3. In the menu on the left, select **Users and groups**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Jobbadmin**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![The "Users and groups" link](common/users-groups-blade.png) +## Configure Jobbadmin SSO -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **Jobbadmin** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [Jobbadmin support team](https://www.jobbnorge.no/om-oss/kontakt-oss). They set this setting to have the SAML SSO connection set properly on both sides. ### Create Jobbadmin test user In this section, you create a user called Britta Simon in Jobbadmin. Work with [Jobbadmin support team](https://www.jobbnorge.no/om-oss/kontakt-oss) to add the users in the Jobbadmin platform. Users must be created and activated before you use single sign-on. -### Test single sign-on +## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +In this section, you test your Azure AD single sign-on configuration with following options. -When you click the Jobbadmin tile in the Access Panel, you should be automatically signed in to the Jobbadmin for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* Click on **Test this application** in Azure portal. This will redirect to Jobbadmin Sign-on URL where you can initiate the login flow. -## Additional Resources +* Go to Jobbadmin Sign-on URL directly and initiate the login flow from there. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the Jobbadmin tile in the My Apps, this will redirect to Jobbadmin Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure Jobbadmin you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/jobscore-tutorial.md b/articles/active-directory/saas-apps/jobscore-tutorial.md index 5c45535b7b24..590cce6ab93b 100644 --- a/articles/active-directory/saas-apps/jobscore-tutorial.md +++ b/articles/active-directory/saas-apps/jobscore-tutorial.md @@ -1,5 +1,5 @@ --- -title: 'Tutorial: Azure Active Directory integration with JobScore | Microsoft Docs' +title: 'Tutorial: Azure AD SSO integration with JobScore' description: Learn how to configure single sign-on between Azure Active Directory and JobScore. services: active-directory author: jeevansd @@ -9,91 +9,70 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/25/2019 +ms.date: 05/25/2022 ms.author: jeedes --- -# Tutorial: Azure Active Directory integration with JobScore +# Tutorial: Azure AD SSO integration with JobScore -In this tutorial, you learn how to integrate JobScore with Azure Active Directory (Azure AD). -Integrating JobScore with Azure AD provides you with the following benefits: +In this tutorial, you'll learn how to integrate JobScore with Azure Active Directory (Azure AD). When you integrate JobScore with Azure AD, you can: -* You can control in Azure AD who has access to JobScore. -* You can enable your users to be automatically signed-in to JobScore (Single Sign-On) with their Azure AD accounts. -* You can manage your accounts in one central location - the Azure portal. - -If you want to know more details about SaaS app integration with Azure AD, see [What is application access and single sign-on with Azure Active Directory](../manage-apps/what-is-single-sign-on.md). -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. +* Control in Azure AD who has access to JobScore. +* Enable your users to be automatically signed-in to JobScore with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. ## Prerequisites -To configure Azure AD integration with JobScore, you need the following items: +To get started, you need the following items: -* An Azure AD subscription. If you don't have an Azure AD environment, you can get one-month trial [here](https://azure.microsoft.com/pricing/free-trial/) -* JobScore single sign-on enabled subscription +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* JobScore single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). ## Scenario description In this tutorial, you configure and test Azure AD single sign-on in a test environment. -* JobScore supports **SP** initiated SSO - -## Adding JobScore from the gallery - -To configure the integration of JobScore into Azure AD, you need to add JobScore from the gallery to your list of managed SaaS apps. - -**To add JobScore from the gallery, perform the following steps:** - -1. In the **[Azure portal](https://portal.azure.com)**, on the left navigation panel, click **Azure Active Directory** icon. - - ![The Azure Active Directory button](common/select-azuread.png) - -2. Navigate to **Enterprise Applications** and then select the **All Applications** option. - - ![The Enterprise applications blade](common/enterprise-applications.png) - -3. To add new application, click **New application** button on the top of dialog. +* JobScore supports **SP** initiated SSO. - ![The New application button](common/add-new-app.png) +> [!NOTE] +> Identifier of this application is a fixed string value so only one instance can be configured in one tenant. -4. In the search box, type **JobScore**, select **JobScore** from result panel then click **Add** button to add the application. +## Add JobScore from the gallery - ![JobScore in the results list](common/search-new-app.png) - -## Configure and test Azure AD single sign-on - -In this section, you configure and test Azure AD single sign-on with JobScore based on a test user called **Britta Simon**. -For single sign-on to work, a link relationship between an Azure AD user and the related user in JobScore needs to be established. - -To configure and test Azure AD single sign-on with JobScore, you need to complete the following building blocks: - -1. **[Configure Azure AD Single Sign-On](#configure-azure-ad-single-sign-on)** - to enable your users to use this feature. -2. **[Configure JobScore Single Sign-On](#configure-jobscore-single-sign-on)** - to configure the Single Sign-On settings on application side. -3. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with Britta Simon. -4. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable Britta Simon to use Azure AD single sign-on. -5. **[Create JobScore test user](#create-jobscore-test-user)** - to have a counterpart of Britta Simon in JobScore that is linked to the Azure AD representation of user. -6. **[Test single sign-on](#test-single-sign-on)** - to verify whether the configuration works. - -### Configure Azure AD single sign-on - -In this section, you enable Azure AD single sign-on in the Azure portal. +To configure the integration of JobScore into Azure AD, you need to add JobScore from the gallery to your list of managed SaaS apps. -To configure Azure AD single sign-on with JobScore, perform the following steps: +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **JobScore** in the search box. +1. Select **JobScore** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. -1. In the [Azure portal](https://portal.azure.com/), on the **JobScore** application integration page, select **Single sign-on**. +## Configure and test Azure AD SSO for JobScore - ![Configure single sign-on link](common/select-sso.png) +Configure and test Azure AD SSO with JobScore using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in JobScore. -2. On the **Select a Single sign-on method** dialog, select **SAML/WS-Fed** mode to enable single sign-on. +To configure and test Azure AD SSO with JobScore, perform the following steps: - ![Single sign-on select mode](common/select-saml-option.png) +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure JobScore SSO](#configure-jobscore-sso)** - to configure the single sign-on settings on application side. + 1. **[Create JobScore test user](#create-jobscore-test-user)** - to have a counterpart of B.Simon in JobScore that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. -3. On the **Set up Single Sign-On with SAML** page, click **Edit** icon to open **Basic SAML Configuration** dialog. +## Configure Azure AD SSO - ![Edit Basic SAML Configuration](common/edit-urls.png) +Follow these steps to enable Azure AD SSO in the Azure portal. -4. On the **Basic SAML Configuration** section, perform the following steps: +1. In the Azure portal, on the **JobScore** application integration page, find the **Manage** section and select **single sign-on**. +2. On the **Select a single sign-on method** page, select **SAML**. +3. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") - ![JobScore Domain and URLs single sign-on information](common/sp-signonurl.png) +4. On the **Basic SAML Configuration** section, perform the following step: In the **Sign-on URL** text box, type a URL using the following pattern: `https://hire.jobscore.com/auth/adfs/` @@ -103,87 +82,54 @@ To configure Azure AD single sign-on with JobScore, perform the following steps: 5. On the **Set up Single Sign-On with SAML** page, in the **SAML Signing Certificate** section, click **Download** to download the **Federation Metadata XML** from the given options as per your requirement and save it on your computer. - ![The Certificate download link](common/metadataxml.png) + ![Screenshot shows the Certificate download link.](common/metadataxml.png "Certificate") 6. On the **Set up JobScore** section, copy the appropriate URL(s) as per your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) - - a. Login URL - - b. Azure Ad Identifier - - c. Logout URL - -### Configure JobScore Single Sign-On - -To configure single sign-on on **JobScore** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [JobScore support team](mailto:support@jobscore.com). They set this setting to have the SAML SSO connection set properly on both sides. + ![Screenshot shows to copy appropriate configuration U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user -The objective of this section is to create a test user in the Azure portal called Britta Simon. - -1. In the Azure portal, in the left pane, select **Azure Active Directory**, select **Users**, and then select **All users**. - - ![The "Users and groups" and "All users" links](common/users.png) - -2. Select **New user** at the top of the screen. - - ![New user Button](common/new-user.png) - -3. In the User properties, perform the following steps. - - ![The User dialog box](common/user-properties.png) - - a. In the **Name** field enter **BrittaSimon**. - - b. In the **User name** field type **brittasimon\@yourcompanydomain.extension** - For example, BrittaSimon@contoso.com +In this section, you'll create a test user in the Azure portal called B.Simon. - c. Select **Show password** check box, and then write down the value that's displayed in the Password box. - - d. Click **Create**. +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. ### Assign the Azure AD test user -In this section, you enable Britta Simon to use Azure single sign-on by granting access to JobScore. - -1. In the Azure portal, select **Enterprise Applications**, select **All applications**, then select **JobScore**. - - ![Enterprise applications blade](common/enterprise-applications.png) - -2. In the applications list, select **JobScore**. - - ![The JobScore link in the Applications list](common/all-applications.png) +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to JobScore. -3. In the menu on the left, select **Users and groups**. +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **JobScore**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. - ![The "Users and groups" link](common/users-groups-blade.png) +## Configure JobScore SSO -4. Click the **Add user** button, then select **Users and groups** in the **Add Assignment** dialog. - - ![The Add Assignment pane](common/add-assign-user.png) - -5. In the **Users and groups** dialog select **Britta Simon** in the Users list, then click the **Select** button at the bottom of the screen. - -6. If you are expecting any role value in the SAML assertion then in the **Select Role** dialog select the appropriate role for the user from the list, then click the **Select** button at the bottom of the screen. - -7. In the **Add Assignment** dialog click the **Assign** button. +To configure single sign-on on **JobScore** side, you need to send the downloaded **Federation Metadata XML** and appropriate copied URLs from Azure portal to [JobScore support team](mailto:support@jobscore.com). They set this setting to have the SAML SSO connection set properly on both sides. ### Create JobScore test user In this section, you create a user called Britta Simon in JobScore. Work with [JobScore support team](mailto:support@jobscore.com) to add the users in the JobScore platform. Users must be created and activated before you use single sign-on. -### Test single sign-on +## Test SSO -In this section, you test your Azure AD single sign-on configuration using the Access Panel. +In this section, you test your Azure AD single sign-on configuration with following options. -When you click the JobScore tile in the Access Panel, you should be automatically signed in to the JobScore for which you set up SSO. For more information about the Access Panel, see [Introduction to the Access Panel](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). +* Click on **Test this application** in Azure portal. This will redirect to JobScore Sign-on URL where you can initiate the login flow. -## Additional Resources +* Go to JobScore Sign-on URL directly and initiate the login flow from there. -- [List of Tutorials on How to Integrate SaaS Apps with Azure Active Directory](./tutorial-list.md) +* You can use Microsoft My Apps. When you click the JobScore tile in the My Apps, this will redirect to JobScore Sign-on URL. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). -- [What is application access and single sign-on with Azure Active Directory?](../manage-apps/what-is-single-sign-on.md) +## Next steps -- [What is Conditional Access in Azure Active Directory?](../conditional-access/overview.md) \ No newline at end of file +Once you configure JobScore you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/nodetrax-project-tutorial.md b/articles/active-directory/saas-apps/nodetrax-project-tutorial.md index 3bb34c43a151..0f44c85d2e0c 100644 --- a/articles/active-directory/saas-apps/nodetrax-project-tutorial.md +++ b/articles/active-directory/saas-apps/nodetrax-project-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 10/06/2021 +ms.date: 05/24/2022 ms.author: jeedes --- @@ -29,6 +29,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * Nodetrax Project single sign-on (SSO) enabled subscription. +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -67,7 +70,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. @@ -78,7 +81,7 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. Nodetrax Project application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. - ![image](common/default-attributes.png) + ![Screenshot shows the image of Nodetrax Project application.](common/default-attributes.png "Attributes") 1. In addition to above, Nodetrax Project application expects few more attributes to be passed back in SAML response which are shown below. These attributes are also pre populated but you can review them as per your requirements. @@ -88,11 +91,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 1. On the **Set up Nodetrax Project** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy Configuration appropriate U R L.](common/copy-configuration-urls.png "Configuration") ### Create an Azure AD test user diff --git a/articles/active-directory/saas-apps/openlearning-tutorial.md b/articles/active-directory/saas-apps/openlearning-tutorial.md index 0f7c546a6187..fc6109a22c91 100644 --- a/articles/active-directory/saas-apps/openlearning-tutorial.md +++ b/articles/active-directory/saas-apps/openlearning-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 02/17/2022 +ms.date: 05/24/2022 ms.author: jeedes --- @@ -29,6 +29,9 @@ To get started, you need the following items: * An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). * OpenLearning single sign-on (SSO) enabled subscription. +> [!NOTE] +> This integration is also available to use from Azure AD US Government Cloud environment. You can find this application in the Azure AD US Government Cloud Application Gallery and configure it in the same way as you do from public cloud. + ## Scenario description In this tutorial, you configure and test Azure AD SSO in a test environment. @@ -67,17 +70,17 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Select a single sign-on method** page, select **SAML**. 1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. - ![Edit Basic SAML Configuration](common/edit-urls.png) + ![Screenshot shows to edit Basic S A M L Configuration.](common/edit-urls.png "Basic Configuration") 1. On the **Basic SAML Configuration** section, if you have **Service Provider metadata file**, perform the following steps: a. Click **Upload metadata file**. - ![Upload metadata file](common/upload-metadata.png) + ![Screenshot shows to upload metadata file.](common/upload-metadata.png "Metadata") b. Click on **folder logo** to select the metadata file and click **Upload**. - ![choose metadata file](common/browse-upload-metadata.png) + ![Screenshot shows to choose metadata file.](common/browse-upload-metadata.png "Folder") c. After the metadata file is successfully uploaded, the **Identifier** value gets auto populated in Basic SAML Configuration section. @@ -89,11 +92,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Set up single sign-on with SAML** page, in the **SAML Signing Certificate** section, find **Certificate (Base64)** and select **Download** to download the certificate and save it on your computer. - ![The Certificate download link](common/certificatebase64.png) + ![Screenshot shows the Certificate download link.](common/certificatebase64.png "Certificate") 1. On the **Set up OpenLearning** section, copy the appropriate URL(s) based on your requirement. - ![Copy configuration URLs](common/copy-configuration-urls.png) + ![Screenshot shows to copy Configuration appropriate U R L.](common/copy-configuration-urls.png "Configuration") 1. OpenLearning application expects to enable token encryption in order to make SSO work. To activate token encryption, go to the **Azure Active Directory** > **Enterprise applications** and select **Token encryption**. For more information, please refer this [link](../manage-apps/howto-saml-token-encryption.md). diff --git a/articles/active-directory/saas-apps/toc.yml b/articles/active-directory/saas-apps/toc.yml index f025d461b379..befe71ad936c 100644 --- a/articles/active-directory/saas-apps/toc.yml +++ b/articles/active-directory/saas-apps/toc.yml @@ -2599,6 +2599,8 @@ href: bldng-app-provisioning-tutorial.md - name: Blink href: blink-provisioning-tutorial.md + - name: Blinq + href: blinq-provisioning-tutorial.md - name: BlogIn href: blogin-provisioning-tutorial.md - name: BlueJeans diff --git a/articles/active-directory/standards/memo-22-09-multi-factor-authentication.md b/articles/active-directory/standards/memo-22-09-multi-factor-authentication.md index b52ee62e51a0..d333e23677a1 100644 --- a/articles/active-directory/standards/memo-22-09-multi-factor-authentication.md +++ b/articles/active-directory/standards/memo-22-09-multi-factor-authentication.md @@ -35,7 +35,7 @@ U.S. Federal agencies will be approaching this guidance from different starting - **[Azure AD certificate-based authentication](../authentication/concept-certificate-based-authentication.md)** offers cloud native certificate based authentication (without dependency on a federated identity provider). This includes smart card implementations such as Common Access Card (CAC) & Personal Identity Verification (PIV) as well as derived PIV credentials deployed to mobile devices or security keys -- **[Windows Hello for Business](/windows/security/identity-protection/hello-for-business/hello-overview)** offers passwordless multifactor authentication that is phishing-resistant. For more information, see the [Windows Hello for Business Deployment Overview](https://docs.microsoft.com/windows/security/identity-protection/hello-for-business/hello-deployment-guide) +- **[Windows Hello for Business](/windows/security/identity-protection/hello-for-business/hello-overview)** offers passwordless multifactor authentication that is phishing-resistant. For more information, see the [Windows Hello for Business Deployment Overview](/windows/security/identity-protection/hello-for-business/hello-deployment-guide) ### Protection from external phishing @@ -75,8 +75,8 @@ For more information on deploying this method, see the following resources: For more information on deploying this method, see the following resources: -- [Deploying Active Directory Federation Services in Azure](https://docs.microsoft.com/windows-server/identity/ad-fs/deployment/how-to-connect-fed-azure-adfs) -- [Configuring AD FS for user certificate authentication](https://docs.microsoft.com/windows-server/identity/ad-fs/operations/configure-user-certificate-authentication) +- [Deploying Active Directory Federation Services in Azure](/windows-server/identity/ad-fs/deployment/how-to-connect-fed-azure-adfs) +- [Configuring AD FS for user certificate authentication](/windows-server/identity/ad-fs/operations/configure-user-certificate-authentication) ### Additional phishing-resistant method considerations @@ -164,4 +164,4 @@ The following articles are part of this documentation set: For more information about Zero Trust, see: -[Securing identity with Zero Trust](/security/zero-trust/deploy/identity) +[Securing identity with Zero Trust](/security/zero-trust/deploy/identity) \ No newline at end of file diff --git a/articles/active-directory/verifiable-credentials/presentation-request-api.md b/articles/active-directory/verifiable-credentials/presentation-request-api.md index 51f43fb45ef3..78a21415cc5d 100644 --- a/articles/active-directory/verifiable-credentials/presentation-request-api.md +++ b/articles/active-directory/verifiable-credentials/presentation-request-api.md @@ -8,7 +8,7 @@ manager: rkarlin ms.service: decentralized-identity ms.topic: reference ms.subservice: verifiable-credentials -ms.date: 10/08/2021 +ms.date: 05/26/2022 ms.author: barclayn #Customer intent: As an administrator, I am trying to learn the process of revoking verifiable credentials that I have issued. @@ -208,7 +208,7 @@ The callback endpoint is called when a user scans the QR code, uses the deep lin | `code` |string |The code returned when the request was retrieved by the authenticator app. Possible values:
  • `request_retrieved`: The user scanned the QR code or selected the link that starts the presentation flow.
  • `presentation_verified`: The verifiable credential validation completed successfully.
| | `state` |string| Returns the state value that you passed in the original payload. | | `subject`|string | The verifiable credential user DID.| -| `issuers`| array |Returns an array of verifiable credentials requested. For each verifiable credential, it provides:
  • The verifiable credential type.
  • The claims retrieved.
  • The verifiable credential issuer’s domain.
  • The verifiable credential issuer’s domain validation status.
  • | +| `issuers`| array |Returns an array of verifiable credentials requested. For each verifiable credential, it provides:
  • The verifiable credential type(s).
  • The issuer's DID
  • The claims retrieved.
  • The verifiable credential issuer’s domain.
  • The verifiable credential issuer’s domain validation status.
  • | | `receipt`| string | Optional. The receipt contains the original payload sent from the wallet to the Verifiable Credentials service. The receipt should be used for troubleshooting/debugging only. The format in the receipt is not fix and can change based on the wallet and version used.| The following example demonstrates a callback payload when the authenticator app starts the presentation request: diff --git a/articles/aks/TOC.yml b/articles/aks/TOC.yml index b34bb64b90be..8eb6e1ea44f8 100644 --- a/articles/aks/TOC.yml +++ b/articles/aks/TOC.yml @@ -337,7 +337,7 @@ href: load-balancer-standard.md - name: Use a static IP address and DNS label href: static-ip.md - - name: Use an HTTP proxy (preview) + - name: Use an HTTP proxy href: http-proxy.md - name: Ingress items: @@ -364,7 +364,7 @@ href: limit-egress-traffic.md - name: Use a user defined route for egress href: egress-outboundtype.md - - name: Managed NAT Gateway (preview) + - name: Managed NAT Gateway href: nat-gateway.md - name: Customize CoreDNS href: coredns-custom.md @@ -386,11 +386,11 @@ href: azure-netapp-files.md - name: Use Azure Ultra Disks href: use-ultra-disks.md - - name: CSI Storage Drivers + - name: CSI storage drivers items: - - name: Enable CSI Storage Drivers + - name: CSI storage driver overview href: csi-storage-drivers.md - - name: Azure Disk CSI drivers + - name: Azure disk CSI drivers href: azure-disk-csi.md - name: Azure Files CSI drivers href: azure-files-csi.md @@ -467,6 +467,8 @@ href: open-service-mesh-uninstall-add-on.md - name: Track releases and region availability href: release-tracker.md + - name: Use Web Application Routing (preview) + href: web-app-routing.md - name: Kubernetes Event-driven Autoscaler add-on (preview) href: keda.md - name: Use cluster extensions diff --git a/articles/aks/aks-migration.md b/articles/aks/aks-migration.md index 3cdda34208a5..3238af52d8b3 100644 --- a/articles/aks/aks-migration.md +++ b/articles/aks/aks-migration.md @@ -132,7 +132,7 @@ Stateless application migration is the most straightforward case: Carefully plan your migration of stateful applications to avoid data loss or unexpected downtime. * If you use Azure Files, you can mount the file share as a volume into the new cluster. See [Mount Static Azure Files as a Volume](./azure-files-volume.md#mount-file-share-as-a-persistent-volume). -* If you use Azure Managed Disks, you can only mount the disk if unattached to any VM. See [Mount Static Azure Disk as a Volume](./azure-disk-volume.md#mount-disk-as-volume). +* If you use Azure Managed Disks, you can only mount the disk if unattached to any VM. See [Mount Static Azure Disk as a Volume](./azure-disk-volume.md#mount-disk-as-a-volume). * If neither of those approaches work, you can use a backup and restore options. See [Velero on Azure](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure/blob/master/README.md). #### Azure Files diff --git a/articles/aks/azure-disk-csi.md b/articles/aks/azure-disk-csi.md index edd748720e76..a36a7a7e536a 100644 --- a/articles/aks/azure-disk-csi.md +++ b/articles/aks/azure-disk-csi.md @@ -1,43 +1,74 @@ --- -title: Use Container Storage Interface (CSI) drivers for Azure Disks on Azure Kubernetes Service (AKS) +title: Use Container Storage Interface (CSI) driver for Azure Disk in Azure Kubernetes Service (AKS) description: Learn how to use the Container Storage Interface (CSI) drivers for Azure disks in an Azure Kubernetes Service (AKS) cluster. services: container-service ms.topic: article -ms.date: 04/06/2022 +ms.date: 05/23/2022 author: palma21 --- -# Use the Azure disk Container Storage Interface (CSI) drivers in Azure Kubernetes Service (AKS) +# Use the Azure disk Container Storage Interface (CSI) driver in Azure Kubernetes Service (AKS) + The Azure disk Container Storage Interface (CSI) driver is a [CSI specification](https://github.com/container-storage-interface/spec/blob/master/spec.md)-compliant driver used by Azure Kubernetes Service (AKS) to manage the lifecycle of Azure disks. The CSI is a standard for exposing arbitrary block and file storage systems to containerized workloads on Kubernetes. By adopting and using CSI, AKS can write, deploy, and iterate plug-ins to expose new or improve existing storage systems in Kubernetes without having to touch the core Kubernetes code and wait for its release cycles. -To create an AKS cluster with CSI driver support, see [Enable CSI drivers for Azure disks and Azure Files on AKS](csi-storage-drivers.md). +To create an AKS cluster with CSI driver support, see [Enable CSI driver on AKS](csi-storage-drivers.md). This article describes how to use the Azure disk CSI driver version 1. + +> [!NOTE] +> Azure disk CSI driver v2 (preview) improves scalability and reduces pod failover latency. It uses shared disks to provision attachment replicas on multiple cluster nodes and integrates with the pod scheduler to ensure a node with an attachment replica is chosen on pod failover. Azure disk CSI driver v2 (preview) also provides the ability to fine tune performance. If you're interested in participating in the preview, submit a request: [https://aka.ms/DiskCSIv2Preview](https://aka.ms/DiskCSIv2Preview). This preview version is provided without a service level agreement, and you can occasionally expect breaking changes while in preview. The preview version isn't recommended for production workloads. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). > [!NOTE] > *In-tree drivers* refers to the current storage drivers that are part of the core Kubernetes code versus the new CSI drivers, which are plug-ins. -## Azure Disk CSI driver new features -Besides original in-tree driver features, Azure Disk CSI driver already provides following new features: -- performance improvement when attach or detach disks in parallel - - in-tree driver attaches or detaches disks in serial while CSI driver would attach or detach disks in batch, there would be significant improvement when there are multiple disks attaching to one node. -- ZRS disk support +## Azure disk CSI driver features + +In addition to in-tree driver features, Azure disk CSI driver supports the following features: + +- Performance improvements during concurrent disk attach and detach + - In-tree drivers attach or detach disks in serial, while CSI drivers attach or detach disks in batch. There is significant improvement when there are multiple disks attaching to one node. +- Zone-redundant storage (ZRS) disk support - `Premium_ZRS`, `StandardSSD_ZRS` disk types are supported, check more details about [Zone-redundant storage for managed disks](../virtual-machines/disks-redundancy.md) - [Snapshot](#volume-snapshots) - [Volume clone](#clone-volumes) - [Resize disk PV without downtime](#resize-a-persistent-volume-without-downtime) +## Storage class driver dynamic disk parameters + +|Name | Meaning | Available Value | Mandatory | Default value +|--- | --- | --- | --- | --- +|skuName | Azure disk storage account type (alias: `storageAccountType`)| `Standard_LRS`, `Premium_LRS`, `StandardSSD_LRS`, `UltraSSD_LRS`, `Premium_ZRS`, `StandardSSD_ZRS` | No | `StandardSSD_LRS`| +|kind | Managed or unmanaged (blob based) disk | `managed` (`dedicated` and `shared` are deprecated) | No | `managed`| +|fsType | File System Type | `ext4`, `ext3`, `ext2`, `xfs`, `btrfs` for Linux, `ntfs` for Windows | No | `ext4` for Linux, `ntfs` for Windows| +|cachingMode | [Azure Data Disk Host Cache Setting](../virtual-machines/windows/premium-storage-performance.md#disk-caching) | `None`, `ReadOnly`, `ReadWrite` | No | `ReadOnly`| +|location | Specify Azure region where Azure disks will be created | `eastus`, `westus`, etc. | No | If empty, driver will use the same location name as current AKS cluster| +|resourceGroup | Specify the resource group where the Azure disk will be created | Existing resource group name | No | If empty, driver will use the same resource group name as current AKS cluster| +|DiskIOPSReadWrite | [UltraSSD disk](../virtual-machines/linux/disks-ultra-ssd.md) IOPS Capability (minimum: 2 IOPS/GiB ) | 100~160000 | No | `500`| +|DiskMBpsReadWrite | [UltraSSD disk](../virtual-machines/linux/disks-ultra-ssd.md) Throughput Capability(minimum: 0.032/GiB) | 1~2000 | No | `100`| +|LogicalSectorSize | Logical sector size in bytes for Ultra disk. Supported values are 512 ad 4096. 4096 is the default. | `512`, `4096` | No | `4096`| +|tags | Azure disk [tags](../azure-resource-manager/management/tag-resources.md) | Tag format: `key1=val1,key2=val2` | No | ""| +|diskEncryptionSetID | ResourceId of the disk encryption set to use for [enabling encryption at rest](../virtual-machines/windows/disk-encryption.md) | format: `/subscriptions/{subs-id}/resourceGroups/{rg-name}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSet-name}` | No | ""| +|diskEncryptionType | Encryption type of the disk encryption set | `EncryptionAtRestWithCustomerKey`(by default), `EncryptionAtRestWithPlatformAndCustomerKeys` | No | ""| +|writeAcceleratorEnabled | [Write Accelerator on Azure Disks](../virtual-machines/windows/how-to-enable-write-accelerator.md) | `true`, `false` | No | ""| +|networkAccessPolicy | NetworkAccessPolicy property to prevent generation of the SAS URI for a disk or a snapshot | `AllowAll`, `DenyAll`, `AllowPrivate` | No | `AllowAll`| +|diskAccessID | ARM ID of the DiskAccess resource to use private endpoints on disks | | No | ``| +|enableBursting | [Enable on-demand bursting](../virtual-machines/disk-bursting.md) beyond the provisioned performance target of the disk. On-demand bursting should only be applied to Premium disk and when the disk size > 512GB. Ultra and shared disk is not supported. Bursting is disabled by default. | `true`, `false` | No | `false`| +|useragent | User agent used for [customer usage attribution](../marketplace/azure-partner-customer-usage-attribution.md)| | No | Generated Useragent formatted `driverName/driverVersion compiler/version (OS-ARCH)`| +|enableAsyncAttach | Allow multiple disk attach operations (in batch) on one node in parallel.
    While this can speed up disk attachment, you may encounter Azure API throttling limit when there are large number of volume attachments. | `true`, `false` | No | `false`| +|subscriptionID | Specify Azure subscription ID where the Azure disk will be created | Azure subscription ID | No | If not empty, `resourceGroup` must be provided.| + ## Use CSI persistent volumes with Azure disks -A [persistent volume](concepts-storage.md#persistent-volumes) (PV) represents a piece of storage that's provisioned for use with Kubernetes pods. A PV can be used by one or many pods and can be dynamically or statically provisioned. This article shows you how to dynamically create PVs with Azure disks for use by a single pod in an AKS cluster. For static provisioning, see [Manually create and use a volume with Azure disks](azure-disk-volume.md). +A [persistent volume](concepts-storage.md#persistent-volumes) (PV) represents a piece of storage that's provisioned for use with Kubernetes pods. A PV can be used by one or many pods and can be dynamically or statically provisioned. This article shows you how to dynamically create PVs with Azure disks for use by a single pod in an AKS cluster. For static provisioning, see [Create a static volume with Azure disks](azure-disk-volume.md). For more information on Kubernetes volumes, see [Storage options for applications in AKS][concepts-storage]. ## Dynamically create Azure disk PVs by using the built-in storage classes -A storage class is used to define how a unit of storage is dynamically created with a persistent volume. For more information on Kubernetes storage classes, see [Kubernetes storage classes][kubernetes-storage-classes]. -When you use storage CSI drivers on AKS, there are two additional built-in `StorageClasses` that use the Azure disk CSI storage drivers. The additional CSI storage classes are created with the cluster alongside the in-tree default storage classes. +A storage class is used to define how a unit of storage is dynamically created with a persistent volume. For more information on Kubernetes storage classes, see [Kubernetes storage classes][kubernetes-storage-classes]. + +When you use the Azure disk storage CSI driver on AKS, there are two additional built-in `StorageClasses` that use the Azure disk CSI storage driver. The additional CSI storage classes are created with the cluster alongside the in-tree default storage classes. - `managed-csi`: Uses Azure Standard SSD locally redundant storage (LRS) to create a managed disk. - `managed-csi-premium`: Uses Azure Premium LRS to create a managed disk. @@ -46,7 +77,7 @@ The reclaim policy in both storage classes ensures that the underlying Azure dis To leverage these storage classes, create a [PVC](concepts-storage.md#persistent-volume-claims) and respective pod that references and uses them. A PVC is used to automatically provision storage based on a storage class. A PVC can use one of the pre-created storage classes or a user-defined storage class to create an Azure-managed disk for the desired SKU and size. When you create a pod definition, the PVC is specified to request the desired storage. -Create an example pod and respective PVC with the [kubectl apply][kubectl-apply] command: +Create an example pod and respective PVC by running the [kubectl apply][kubectl-apply] command: ```console $ kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/pvc-azuredisk-csi.yaml @@ -56,13 +87,13 @@ persistentvolumeclaim/pvc-azuredisk created pod/nginx-azuredisk created ``` -After the pod is in the running state, create a new file called `test.txt`. +After the pod is in the running state, run the following command to create a new file called `test.txt`. ```bash $ kubectl exec nginx-azuredisk -- touch /mnt/azuredisk/test.txt ``` -You can now validate that the disk is correctly mounted by running the following command and verifying you see the `test.txt` file in the output: +To validate the disk is correctly mounted, run the following command and verify you see the `test.txt` file in the output: ```console $ kubectl exec nginx-azuredisk -- ls /mnt/azuredisk @@ -74,14 +105,13 @@ test.txt ## Create a custom storage class -The default storage classes suit the most common scenarios, but not all. For some cases, you might want to have your own storage class customized with your own parameters. For example, we have a scenario where you might want to change the `volumeBindingMode` class. +The default storage classes are suitable for most common scenarios. For some cases, you might want to have your own storage class customized with your own parameters. For example, you might want to change the `volumeBindingMode` class. -You can use a `volumeBindingMode: Immediate` class that guarantees that occurs immediately once the PVC is created. In cases where your node pools are topology constrained, for example, using availability zones, PVs would be bound or provisioned without knowledge of the pod's scheduling requirements (in this case to be in a specific zone). +You can use a `volumeBindingMode: Immediate` class that guarantees it occurs immediately once the PVC is created. In cases where your node pools are topology constrained, for example when using availability zones, PVs would be bound or provisioned without knowledge of the pod's scheduling requirements (in this case to be in a specific zone). -To address this scenario, you can use `volumeBindingMode: WaitForFirstConsumer`, which delays the binding and provisioning of a PV until a pod that uses the PVC is created. In this way, the PV will conform and be provisioned in the availability zone (or other topology) that's specified by the pod's scheduling constraints. The default storage classes use `volumeBindingMode: WaitForFirstConsumer` class. +To address this scenario, you can use `volumeBindingMode: WaitForFirstConsumer`, which delays the binding and provisioning of a PV until a pod that uses the PVC is created. This way, the PV conforms and is provisioned in the availability zone (or other topology) that's specified by the pod's scheduling constraints. The default storage classes use `volumeBindingMode: WaitForFirstConsumer` class. -Create a file named `sc-azuredisk-csi-waitforfirstconsumer.yaml`, and paste the following manifest. -The storage class is the same as our `managed-csi` storage class but with a different `volumeBindingMode` class. +Create a file named `sc-azuredisk-csi-waitforfirstconsumer.yaml`, and then paste the following manifest. The storage class is the same as our `managed-csi` storage class, but with a different `volumeBindingMode` class. ```yaml kind: StorageClass @@ -96,7 +126,7 @@ reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer ``` -Create the storage class with the [kubectl apply][kubectl-apply] command, and specify your `sc-azuredisk-csi-waitforfirstconsumer.yaml` file: +Create the storage class by running the [kubectl apply][kubectl-apply] command and specify your `sc-azuredisk-csi-waitforfirstconsumer.yaml` file: ```console $ kubectl apply -f sc-azuredisk-csi-waitforfirstconsumer.yaml @@ -108,7 +138,15 @@ storageclass.storage.k8s.io/azuredisk-csi-waitforfirstconsumer created The Azure disk CSI driver supports creating [snapshots of persistent volumes](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html). As part of this capability, the driver can perform either *full* or [*incremental* snapshots](../virtual-machines/disks-incremental-snapshots.md) depending on the value set in the `incremental` parameter (by default, it's true). -For details on all the parameters, see [volume snapshot class parameters](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md#volumesnapshotclass). +The following table provides details for all of the parameters. + +|Name | Meaning | Available Value | Mandatory | Default value +|--- | --- | --- | --- | --- +|resourceGroup | Resource group for storing snapshot shots | EXISTING RESOURCE GROUP | No | If not specified, snapshot will be stored in the same resource group as source Azure disk +|incremental | Take [full or incremental snapshot](../virtual-machines/windows/incremental-snapshots.md) | `true`, `false` | No | `true` +|tags | azure disk [tags](../azure-resource-manager/management/tag-resources.md) | Tag format: 'key1=val1,key2=val2' | No | "" +|userAgent | User agent used for [customer usage attribution](../marketplace/azure-partner-customer-usage-attribution.md) | | No | Generated Useragent formatted `driverName/driverVersion compiler/version (OS-ARCH)` +|subscriptionID | Specify Azure subscription ID in which Azure disk will be created | Azure subscription ID | No | If not empty, `resourceGroup` must be provided, `incremental` must set as `false` ### Create a volume snapshot @@ -199,7 +237,7 @@ persistentvolumeclaim/pvc-azuredisk-cloning created pod/nginx-restored-cloning created ``` -We can now check the content of the cloned volume by running the following command and confirming we still see our `test.txt` created file. +You can verify the content of the cloned volume by running the following command and confirming the file `test.txt` is created. ```console $ kubectl exec nginx-restored-cloning -- ls /mnt/azuredisk @@ -216,7 +254,7 @@ You can request a larger volume for a PVC. Edit the PVC object, and specify a la > [!NOTE] > A new PV is never created to satisfy the claim. Instead, an existing volume is resized. -In AKS, the built-in `managed-csi` storage class already allows for expansion, so use the [PVC created earlier with this storage class](#dynamically-create-azure-disk-pvs-by-using-the-built-in-storage-classes). The PVC requested a 10-Gi persistent volume. We can confirm that by running: +In AKS, the built-in `managed-csi` storage class already supports expansion, so use the [PVC created earlier with this storage class](#dynamically-create-azure-disk-pvs-by-using-the-built-in-storage-classes). The PVC requested a 10-Gi persistent volume. You can confirm by running the following command: ```console $ kubectl exec -it nginx-azuredisk -- df -h /mnt/azuredisk @@ -226,11 +264,11 @@ Filesystem Size Used Avail Use% Mounted on ``` > [!IMPORTANT] -> Currently, Azure disk CSI driver supports resizing PVCs without downtime on specific regions. +> Azure disk CSI driver supports resizing PVCs without downtime in specific regions. > Follow this [link][expand-an-azure-managed-disk] to register the disk online resize feature. > If your cluster is not in the supported region list, you need to delete application first to detach disk on the node before expanding PVC. -Let's expand the PVC by increasing the `spec.resources.requests.storage` field: +Expand the PVC by increasing the `spec.resources.requests.storage` field running the following command: ```console $ kubectl patch pvc pvc-azuredisk --type merge --patch '{"spec": {"resources": {"requests": {"storage": "15Gi"}}}}' @@ -238,7 +276,7 @@ $ kubectl patch pvc pvc-azuredisk --type merge --patch '{"spec": {"resources": { persistentvolumeclaim/pvc-azuredisk patched ``` -Let's confirm the volume is now larger: +Run the following command to confirm the volume size has increased: ```console $ kubectl get pv @@ -248,7 +286,7 @@ pvc-391ea1a6-0191-4022-b915-c8dc4216174a 15Gi RWO Delete (...) ``` -And after a few minutes, confirm the size of the PVC and inside the pod: +And after a few minutes, run the following commands to confirm the size of the PVC and inside the pod: ```console $ kubectl get pvc pvc-azuredisk @@ -262,9 +300,9 @@ Filesystem Size Used Avail Use% Mounted on ## Windows containers -The Azure disk CSI driver also supports Windows nodes and containers. If you want to use Windows containers, follow the [Windows containers quickstart][aks-quickstart-cli] to add a Windows node pool. +The Azure disk CSI driver supports Windows nodes and containers. If you want to use Windows containers, follow the [Windows containers quickstart][aks-quickstart-cli] to add a Windows node pool. -After you have a Windows node pool, you can now use the built-in storage classes like `managed-csi`. You can deploy an example [Windows-based stateful set](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/deploy/example/windows/statefulset.yaml) that saves timestamps into the file `data.txt` by deploying the following command with the [kubectl apply][kubectl-apply] command: +After you have a Windows node pool, you can now use the built-in storage classes like `managed-csi`. You can deploy an example [Windows-based stateful set](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/deploy/example/windows/statefulset.yaml) that saves timestamps into the file `data.txt` by running the following [kubectl apply][kubectl-apply] command: ```console $ kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/windows/statefulset.yaml @@ -272,7 +310,7 @@ $ kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-c statefulset.apps/busybox-azuredisk created ``` -You can now validate the contents of the volume by running: +To validate the content of the volume, run the following command: ```console $ kubectl exec -it busybox-azuredisk-0 -- cat c:\\mnt\\azuredisk\\data.txt # on Linux/MacOS Bash @@ -286,7 +324,7 @@ $ kubectl exec -it busybox-azuredisk-0 -- cat c:\mnt\azuredisk\data.txt # on Win ## Next steps -- To learn how to use CSI drivers for Azure Files, see [Use Azure Files with CSI drivers](azure-files-csi.md). +- To learn how to use CSI driver for Azure Files, see [Use Azure Files with CSI driver](azure-files-csi.md). - For more information about storage best practices, see [Best practices for storage and backups in Azure Kubernetes Service][operator-best-practices-storage]. diff --git a/articles/aks/azure-disk-volume.md b/articles/aks/azure-disk-volume.md index 6eaa2145c456..806a56edda71 100644 --- a/articles/aks/azure-disk-volume.md +++ b/articles/aks/azure-disk-volume.md @@ -3,13 +3,13 @@ title: Create a static volume for pods in Azure Kubernetes Service (AKS) description: Learn how to manually create a volume with Azure disks for use with a pod in Azure Kubernetes Service (AKS) services: container-service ms.topic: article -ms.date: 05/09/2019 +ms.date: 05/17/2022 #Customer intent: As a developer, I want to learn how to manually create and attach storage to a specific pod in AKS. --- -# Manually create and use a volume with Azure disks in Azure Kubernetes Service (AKS) +# Create a static volume with Azure disks in Azure Kubernetes Service (AKS) Container-based applications often need to access and persist data in an external data volume. If a single pod needs access to storage, you can use Azure disks to present a native volume for application use. This article shows you how to manually create an Azure disk and attach it to a pod in AKS. @@ -22,129 +22,145 @@ For more information on Kubernetes volumes, see [Storage options for application This article assumes that you have an existing AKS cluster with 1.21 or later version. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. -If you want to interact with Azure Disks on an AKS cluster with 1.20 or previous version, see the [Kubernetes plugin for Azure Disks][kubernetes-disks]. +If you want to interact with Azure disks on an AKS cluster with 1.20 or previous version, see the [Kubernetes plugin for Azure disks][kubernetes-disks]. -## Create an Azure disk - -When you create an Azure disk for use with AKS, you can create the disk resource in the **node** resource group. This approach allows the AKS cluster to access and manage the disk resource. If you instead create the disk in a separate resource group, you must grant the Azure Kubernetes Service (AKS) managed identity for your cluster the `Contributor` role to the disk's resource group. - -For this article, create the disk in the node resource group. First, get the resource group name with the [az aks show][az-aks-show] command and add the `--query nodeResourceGroup` query parameter. The following example gets the node resource group for the AKS cluster name *myAKSCluster* in the resource group name *myResourceGroup*: +## Storage class static provisioning -```azurecli-interactive -$ az aks show --resource-group myResourceGroup --name myAKSCluster --query nodeResourceGroup -o tsv +The following table describes the Storage Class parameters for the Azure disk CSI driver static provisioning: -MC_myResourceGroup_myAKSCluster_eastus -``` +|Name | Meaning | Available Value | Mandatory | Default value| +|--- | --- | --- | --- | ---| +|volumeHandle| Azure disk URI | `/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}` | Yes | N/A| +|volumeAttributes.fsType | File system type | `ext4`, `ext3`, `ext2`, `xfs`, `btrfs` for Linux, `ntfs` for Windows | No | `ext4` for Linux, `ntfs` for Windows | +|volumeAttributes.partition | Partition number of the existing disk (only supported on Linux) | `1`, `2`, `3` | No | Empty (no partition)
    - Make sure partition format is like `-part1` | +|volumeAttributes.cachingMode | [Disk host cache setting](../virtual-machines/windows/premium-storage-performance.md#disk-caching)| `None`, `ReadOnly`, `ReadWrite` | No | `ReadOnly`| -Now create a disk using the [az disk create][az-disk-create] command. Specify the node resource group name obtained in the previous command, and then a name for the disk resource, such as *myAKSDisk*. The following example creates a *20*GiB disk, and outputs the ID of the disk once created. If you need to create a disk for use with Windows Server containers, add the `--os-type windows` parameter to correctly format the disk. - -```azurecli-interactive -az disk create \ - --resource-group MC_myResourceGroup_myAKSCluster_eastus \ - --name myAKSDisk \ - --size-gb 20 \ - --query id --output tsv -``` +## Create an Azure disk -> [!NOTE] -> Azure disks are billed by SKU for a specific size. These SKUs range from 32GiB for S4 or P4 disks to 32TiB for S80 or P80 disks (in preview). The throughput and IOPS performance of a Premium managed disk depends on both the SKU and the instance size of the nodes in the AKS cluster. See [Pricing and Performance of Managed Disks][managed-disk-pricing-performance]. - -The disk resource ID is displayed once the command has successfully completed, as shown in the following example output. This disk ID is used to mount the disk in the next step. - -```console -/subscriptions//resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk -``` - -## Mount disk as volume -Create a *pv-azuredisk.yaml* file with a *PersistentVolume*. Update `volumeHandle` with disk resource ID. For example: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pv-azuredisk -spec: - capacity: - storage: 20Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - storageClassName: managed-csi - csi: - driver: disk.csi.azure.com - readOnly: false - volumeHandle: /subscriptions//resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk - volumeAttributes: - fsType: ext4 -``` - -Create a *pvc-azuredisk.yaml* file with a *PersistentVolumeClaim* that uses the *PersistentVolume*. For example: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: pvc-azuredisk -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - volumeName: pv-azuredisk - storageClassName: managed-csi -``` - -Use the `kubectl` commands to create the *PersistentVolume* and *PersistentVolumeClaim*. - -```console -kubectl apply -f pv-azuredisk.yaml -kubectl apply -f pvc-azuredisk.yaml -``` - -Verify your *PersistentVolumeClaim* is created and bound to the *PersistentVolume*. - -```console -$ kubectl get pvc pvc-azuredisk - -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -pvc-azuredisk Bound pv-azuredisk 20Gi RWO 5s -``` - -Create a *azure-disk-pod.yaml* file to reference your *PersistentVolumeClaim*. For example: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: mypod -spec: - containers: - - image: mcr.microsoft.com/oss/nginx/nginx:1.15.5-alpine - name: mypod - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - volumeMounts: - - name: azure - mountPath: /mnt/azure - volumes: - - name: azure - persistentVolumeClaim: - claimName: pvc-azuredisk -``` - -```console -kubectl apply -f azure-disk-pod.yaml -``` +When you create an Azure disk for use with AKS, you can create the disk resource in the **node** resource group. This approach allows the AKS cluster to access and manage the disk resource. If instead you created the disk in a separate resource group, you must grant the Azure Kubernetes Service (AKS) managed identity for your cluster the `Contributor` role to the disk's resource group. In this exercise, you're going to create the disk in the same resource group as your cluster. + +1. Identify the resource group name using the [az aks show][az-aks-show] command and add the `--query nodeResourceGroup` parameter. The following example gets the node resource group for the AKS cluster name *myAKSCluster* in the resource group name *myResourceGroup*: + + ```azurecli-interactive + $ az aks show --resource-group myResourceGroup --name myAKSCluster --query nodeResourceGroup -o tsv + + MC_myResourceGroup_myAKSCluster_eastus + ``` + +2. Create a disk using the [az disk create][az-disk-create] command. Specify the node resource group name obtained in the previous command, and then a name for the disk resource, such as *myAKSDisk*. The following example creates a *20*GiB disk, and outputs the ID of the disk after it's created. If you need to create a disk for use with Windows Server containers, add the `--os-type windows` parameter to correctly format the disk. + + ```azurecli-interactive + az disk create \ + --resource-group MC_myResourceGroup_myAKSCluster_eastus \ + --name myAKSDisk \ + --size-gb 20 \ + --query id --output tsv + ``` + + > [!NOTE] + > Azure disks are billed by SKU for a specific size. These SKUs range from 32GiB for S4 or P4 disks to 32TiB for S80 or P80 disks (in preview). The throughput and IOPS performance of a Premium managed disk depends on both the SKU and the instance size of the nodes in the AKS cluster. See [Pricing and Performance of Managed Disks][managed-disk-pricing-performance]. + + The disk resource ID is displayed once the command has successfully completed, as shown in the following example output. This disk ID is used to mount the disk in the next section. + + ```console + /subscriptions//resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk + ``` + +## Mount disk as a volume + +1. Create a *pv-azuredisk.yaml* file with a *PersistentVolume*. Update `volumeHandle` with disk resource ID from the previous step. For example: + + ```yaml + apiVersion: v1 + kind: PersistentVolume + metadata: + name: pv-azuredisk + spec: + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: managed-csi + csi: + driver: disk.csi.azure.com + readOnly: false + volumeHandle: /subscriptions//resourceGroups/MC_myAKSCluster_myAKSCluster_eastus/providers/Microsoft.Compute/disks/myAKSDisk + volumeAttributes: + fsType: ext4 + ``` + +2. Create a *pvc-azuredisk.yaml* file with a *PersistentVolumeClaim* that uses the *PersistentVolume*. For example: + + ```yaml + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: pvc-azuredisk + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + volumeName: pv-azuredisk + storageClassName: managed-csi + ``` + +3. Use the `kubectl` commands to create the *PersistentVolume* and *PersistentVolumeClaim*, referencing the two YAML files created earlier: + + ```console + kubectl apply -f pv-azuredisk.yaml + kubectl apply -f pvc-azuredisk.yaml + ``` + +4. To verify your *PersistentVolumeClaim* is created and bound to the *PersistentVolume*, run the +following command: + + ```console + $ kubectl get pvc pvc-azuredisk + + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + pvc-azuredisk Bound pv-azuredisk 20Gi RWO 5s + ``` + +5. Create a *azure-disk-pod.yaml* file to reference your *PersistentVolumeClaim*. For example: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: mypod + spec: + containers: + - image: mcr.microsoft.com/oss/nginx/nginx:1.15.5-alpine + name: mypod + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + volumeMounts: + - name: azure + mountPath: /mnt/azure + volumes: + - name: azure + persistentVolumeClaim: + claimName: pvc-azuredisk + ``` + +6. Run the following command to apply the configuration and mount the volume, referencing the YAML +configuration file created in the previous steps: + + ```console + kubectl apply -f azure-disk-pod.yaml + ``` ## Next steps -For associated best practices, see [Best practices for storage and backups in AKS][operator-best-practices-storage]. +To learn about our recommended storage and backup practices, see [Best practices for storage and backups in AKS][operator-best-practices-storage]. [kubernetes-disks]: https://github.com/kubernetes/examples/blob/master/staging/volumes/azure_disk/README.md diff --git a/articles/aks/configure-azure-cni.md b/articles/aks/configure-azure-cni.md index 76a851450e9e..caa16de9348e 100644 --- a/articles/aks/configure-azure-cni.md +++ b/articles/aks/configure-azure-cni.md @@ -285,12 +285,6 @@ The following questions and answers apply to the **Azure CNI network configurati The entire cluster should use only one type of CNI. -## AKS Engine - -[Azure Kubernetes Service Engine (AKS Engine)][aks-engine] is an open-source project that generates Azure Resource Manager templates you can use for deploying Kubernetes clusters on Azure. - -Kubernetes clusters created with AKS Engine support both the [kubenet][kubenet] and [Azure CNI][cni-networking] plugins. As such, both networking scenarios are supported by AKS Engine. - ## Next steps Learn more about networking in AKS in the following articles: @@ -308,7 +302,6 @@ Learn more about networking in AKS in the following articles: [portal-01-networking-advanced]: ./media/networking-overview/portal-01-networking-advanced.png -[aks-engine]: https://github.com/Azure/aks-engine [services]: https://kubernetes.io/docs/concepts/services-networking/service/ [portal]: https://portal.azure.com [cni-networking]: https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md diff --git a/articles/aks/csi-secrets-store-driver.md b/articles/aks/csi-secrets-store-driver.md index 40e720b94487..875219151f17 100644 --- a/articles/aks/csi-secrets-store-driver.md +++ b/articles/aks/csi-secrets-store-driver.md @@ -164,14 +164,14 @@ az aks disable-addons --addons azure-keyvault-secrets-provider -g myResourceGrou > When the Azure Key Vault Provider for Secrets Store CSI Driver is enabled, it updates the pod mount and the Kubernetes secret that's defined in the `secretObjects` field of `SecretProviderClass`. It does so by polling for changes periodically, based on the rotation poll interval you've defined. The default rotation poll interval is 2 minutes. >[!NOTE] -> When the secret/key is updated in external secrets store after the initial pod deployment, the updated secret will be periodically updated in the pod mount and the Kubernetes Secret. +> When a secret is updated in an external secrets store after initial pod deployment, the Kubernetes Secret and the pod mount will be periodically updated depending on how the application consumes the secret data. > -> Depending on how the application consumes the secret data: +> **Mount the Kubernetes Secret as a volume**: Use the auto rotation and Sync K8s secrets features of Secrets Store CSI Driver. The application will need to watch for changes from the mounted Kubernetes Secret volume. When the Kubernetes Secret is updated by the CSI Driver, the corresponding volume contents are automatically updated. > -> 1. Mount Kubernetes secret as a volume: Use auto rotation feature + Sync K8s secrets feature in Secrets Store CSI Driver, application will need to watch for changes from the mounted Kubernetes Secret volume. When the Kubernetes Secret is updated by the CSI Driver, the corresponding volume contents are automatically updated. -> 2. Application reads the data from container’s filesystem: Use rotation feature in Secrets Store CSI Driver, application will need to watch for the file change from the volume mounted by the CSI driver. -> 3. Using Kubernetes secret for environment variable: The pod needs to be restarted to get the latest secret as environment variable. -> Use something like https://github.com/stakater/Reloader to watch for changes on the synced Kubernetes secret and do rolling upgrades on pods +> **Application reads the data from the container’s filesystem**: Use the rotation feature of Secrets Store CSI Driver. The application will need to watch for the file change from the volume mounted by the CSI driver. +> +> **Use the Kubernetes Secret for an environment variable**: Restart the pod to get the latest secret as an environment variable. +> Use a tool such as [Reloader][reloader] to watch for changes on the synced Kubernetes Secret and perform rolling upgrades on pods. To enable autorotation of secrets, use the `enable-secret-rotation` flag when you create your cluster: @@ -332,3 +332,5 @@ Now that you've learned how to use the Azure Key Vault Provider for Secrets Stor [kube-csi]: https://kubernetes-csi.github.io/docs/ [key-vault-provider-install]: https://azure.github.io/secrets-store-csi-driver-provider-azure/getting-started/installation [sample-secret-provider-class]: https://azure.github.io/secrets-store-csi-driver-provider-azure/getting-started/usage/#create-your-own-secretproviderclass-object +[reloader]: https://github.com/stakater/Reloader + diff --git a/articles/aks/csi-storage-drivers.md b/articles/aks/csi-storage-drivers.md index dbac1b4b5841..5a011bd9d431 100644 --- a/articles/aks/csi-storage-drivers.md +++ b/articles/aks/csi-storage-drivers.md @@ -1,14 +1,14 @@ --- -title: Enable Container Storage Interface (CSI) drivers on Azure Kubernetes Service (AKS) +title: Container Storage Interface (CSI) drivers in Azure Kubernetes Service (AKS) description: Learn how to enable the Container Storage Interface (CSI) drivers for Azure disks and Azure Files in an Azure Kubernetes Service (AKS) cluster. services: container-service ms.topic: article -ms.date: 05/06/2022 +ms.date: 05/23/2022 author: palma21 --- -# Enable Container Storage Interface (CSI) drivers on Azure Kubernetes Service (AKS) +# Container Storage Interface (CSI) drivers in Azure Kubernetes Service (AKS) The Container Storage Interface (CSI) is a standard for exposing arbitrary block and file storage systems to containerized workloads on Kubernetes. By adopting and using CSI, Azure Kubernetes Service (AKS) can write, deploy, and iterate plug-ins to expose new or improve existing storage systems in Kubernetes without having to touch the core Kubernetes code and wait for its release cycles. @@ -22,6 +22,9 @@ The CSI storage driver support on AKS allows you to natively use: > > *In-tree drivers* refers to the current storage drivers that are part of the core Kubernetes code opposed to the new CSI drivers, which are plug-ins. +> [!NOTE] +> Azure disk CSI driver v2 (preview) improves scalability and reduces pod failover latency. It uses shared disks to provision attachment replicas on multiple cluster nodes and integrates with the pod scheduler to ensure a node with an attachment replica is chosen on pod failover. Azure disk CSI driver v2 (preview) also provides the ability to fine tune performance. If you're interested in participating in the preview, submit a request: [https://aka.ms/DiskCSIv2Preview](https://aka.ms/DiskCSIv2Preview). This preview version is provided without a service level agreement, and you can occasionally expect breaking changes while in preview. The preview version isn't recommended for production workloads. For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + ## Migrate custom in-tree storage classes to CSI If you created in-tree driver storage classes, those storage classes continue to work since CSI migration is turned on after upgrading your cluster to 1.21.x. If you want to use CSI features you'll need to perform the migration. @@ -61,7 +64,7 @@ parameters: ## Migrate in-tree persistent volumes > [!IMPORTANT] -> If your in-tree persistent volume `reclaimPolicy` is set to **Delete**, you need to change its policy to **Retain** to persist your data. This can be achieved using a [patch operation on the PV](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/). For example: +> If your in-tree persistent volume `reclaimPolicy` is set to **Delete**, you need to change its policy to **Retain** to persist your data. This can be achieved using a [patch operation on the PV](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/). For example: > > ```console > $ kubectl patch pv pv-azuredisk --type merge --patch '{"spec": {"persistentVolumeReclaimPolicy": "Retain"}}' @@ -93,7 +96,7 @@ If you have in-tree Azure File persistent volumes, get `secretName`, `shareName` [azure-disk-volume]: azure-disk-volume.md -[azure-disk-static-mount]: azure-disk-volume.md#mount-disk-as-volume +[azure-disk-static-mount]: azure-disk-volume.md#mount-disk-as-a-volume [azure-file-static-mount]: azure-files-volume.md#mount-file-share-as-a-persistent-volume [azure-files-pvc]: azure-files-dynamic-pv.md [premium-storage]: ../virtual-machines/disks-types.md diff --git a/articles/aks/dapr.md b/articles/aks/dapr.md index 3b349f0ddfad..7684a049bec4 100644 --- a/articles/aks/dapr.md +++ b/articles/aks/dapr.md @@ -59,7 +59,7 @@ Global Azure cloud is supported with Arc support on the regions listed by [Azure ## Prerequisites - If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. -- Install the latest version of the [Azure CLI](/cli/azure/install-azure-cli-windows). +- Install the latest version of the [Azure CLI][install-cli]. - If you don't have one already, you need to create an [AKS cluster][deploy-cluster] or connect an [Arc-enabled Kubernetes cluster][arc-k8s-cluster]. ### Set up the Azure CLI extension for cluster extensions @@ -262,8 +262,9 @@ az k8s-extension delete --resource-group myResourceGroup --cluster-name myAKSClu [az-provider-register]: /cli/azure/provider#az-provider-register [sample-application]: ./quickstart-dapr.md [k8s-version-support-policy]: ./supported-kubernetes-versions.md?tabs=azure-cli#kubernetes-version-support-policy -[arc-k8s-cluster]: /azure-arc/kubernetes/quickstart-connect-cluster.md +[arc-k8s-cluster]: ../azure-arc/kubernetes/quickstart-connect-cluster.md [update-extension]: ./cluster-extensions.md#update-extension-instance +[install-cli]: /cli/azure/install-azure-cli [kubernetes-production]: https://docs.dapr.io/operations/hosting/kubernetes/kubernetes-production @@ -275,4 +276,4 @@ az k8s-extension delete --resource-group myResourceGroup --cluster-name myAKSClu [dapr-oss-support]: https://docs.dapr.io/operations/support/support-release-policy/ [dapr-supported-version]: https://docs.dapr.io/operations/support/support-release-policy/#supported-versions [dapr-troubleshooting]: https://docs.dapr.io/operations/troubleshooting/common_issues/ -[supported-cloud-regions]: https://azure.microsoft.com/global-infrastructure/services/?products=azure-arc +[supported-cloud-regions]: https://azure.microsoft.com/global-infrastructure/services/?products=azure-arc \ No newline at end of file diff --git a/articles/aks/draft.md b/articles/aks/draft.md index 00891e646d7e..c50a8b78eaa8 100644 --- a/articles/aks/draft.md +++ b/articles/aks/draft.md @@ -28,30 +28,16 @@ Draft has the following commands to help ease your development on Kubernetes: - Install the latest version of the [Azure CLI](/cli/azure/install-azure-cli-windows) and the *aks-preview* extension. - If you don't have one already, you need to create an [AKS cluster][deploy-cluster]. -### Install the `AKS-Draft` extension preview +### Install the `aks-preview` Azure CLI extension [!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] -To create an AKS cluster that can use the Draft extension, you must enable the `AKS-ExtensionManager` and `AKS-Draft` feature flags on your subscription. - -Register the `AKS-ExtensionManager` and `AKS-Draft` feature flags by using the [az feature register][az-feature-register] command, as shown in the following example: - ```azurecli-interactive -az extension add --name draft -``` - -### Set up the Azure CLI extension for cluster extensions +# Install the aks-preview extension +az extension add --name aks-preview -You'll also need the `k8s-extension` Azure CLI extension, which can be installed by running the following command: - -```azurecli-interactive -az extension add --name k8s-extension -``` - -If the `k8s-extension` extension is already installed, you can update it to the latest version using the following command: - -```azurecli-interactive -az extension update --name k8s-extension +# Update the extension to make sure you have the latest version installed +az extension update --name aks-preview ``` ## Create artifacts using `draft create` @@ -104,12 +90,20 @@ You can also run the command on a specific directory using the `--destination` f az aks draft up --destination /Workspaces/ContosoAir ``` -## Delete the extension +## Use Web Application Routing with Draft to make your application accessible over the internet + +[Web Application Routing][web-app-routing] is the easiest way to get your web application up and running in Kubernetes securely, removing the complexity of ingress controllers and certificate and DNS management while offering configuration for enterprises looking to bring their own. Web Application Routing offers a managed ingress controller based on nginx that you can use without restrictions and integrates out of the box with Open Service Mesh to secure intra-cluster communications. -To delete the extension and remove Draft from your AKS cluster, you can use the following command: +To set up Draft with Web Application Routing, use `az aks draft update` and pass in the DNS name and Azure Key Vault-stored certificate when prompted: + +```azure-cli-interactive +az aks draft update +``` + +You can also run the command on a specific directory using the `--destination` flag: ```azure-cli-interactive -az k8s-extension delete --resource-group myResourceGroup --cluster-name myAKSCluster --cluster-type managedClusters --name draft +az aks draft update --destination /Workspaces/ContosoAir ``` @@ -120,3 +114,5 @@ az k8s-extension delete --resource-group myResourceGroup --cluster-name myAKSClu [sample-application]: ./quickstart-dapr.md [k8s-version-support-policy]: ./supported-kubernetes-versions.md?tabs=azure-cli#kubernetes-version-support-policy [web-app-routing]: web-app-routing.md +[az-extension-add]: /cli/azure/extension#az-extension-add +[az-extension-update]: /cli/azure/extension#az-extension-update diff --git a/articles/aks/integrations.md b/articles/aks/integrations.md index ce4624e2b162..a6c3d24324bf 100644 --- a/articles/aks/integrations.md +++ b/articles/aks/integrations.md @@ -26,6 +26,7 @@ The below table shows the available add-ons. | ingress-appgw | Use Application Gateway Ingress Controller with your AKS cluster. | [What is Application Gateway Ingress Controller?][agic] | | open-service-mesh | Use Open Service Mesh with your AKS cluster. | [Open Service Mesh AKS add-on][osm] | | azure-keyvault-secrets-provider | Use Azure Keyvault Secrets Provider addon.| [Use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster][keyvault-secret-provider] | +| web_application_routing | Use a managed NGINX ingress Controller with your AKS cluster.| [Web Application Routing Overview][web-app-routing] | ## Extensions diff --git a/articles/aks/internal-lb.md b/articles/aks/internal-lb.md index 68ca708437c9..466d95151f45 100644 --- a/articles/aks/internal-lb.md +++ b/articles/aks/internal-lb.md @@ -143,7 +143,6 @@ Learn more about Kubernetes services at the [Kubernetes services documentation][ [kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply [kubernetes-services]: https://kubernetes.io/docs/concepts/services-networking/service/ -[aks-engine]: https://github.com/Azure/aks-engine [advanced-networking]: configure-azure-cni.md diff --git a/articles/aks/intro-kubernetes.md b/articles/aks/intro-kubernetes.md index d5600d4d2648..f40ffb5308c2 100644 --- a/articles/aks/intro-kubernetes.md +++ b/articles/aks/intro-kubernetes.md @@ -149,7 +149,6 @@ Learn more about deploying and managing AKS with the Azure CLI Quickstart. > [Deploy an AKS Cluster using Azure CLI][aks-quickstart-cli] -[aks-engine]: https://github.com/Azure/aks-engine [kubectl-overview]: https://kubernetes.io/docs/user-guide/kubectl-overview/ [compliance-doc]: https://azure.microsoft.com/overview/trusted-cloud/compliance/ diff --git a/articles/aks/load-balancer-standard.md b/articles/aks/load-balancer-standard.md index d05184f25308..1e36ee7f04bf 100644 --- a/articles/aks/load-balancer-standard.md +++ b/articles/aks/load-balancer-standard.md @@ -378,7 +378,6 @@ Learn more about using Internal Load Balancer for Inbound traffic at the [AKS In [kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get [kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply [kubernetes-services]: https://kubernetes.io/docs/concepts/services-networking/service/ -[aks-engine]: https://github.com/Azure/aks-engine [advanced-networking]: configure-azure-cni.md diff --git a/articles/aks/monitor-aks.md b/articles/aks/monitor-aks.md index 7cf77bdf7b79..9ffa5bdf7061 100644 --- a/articles/aks/monitor-aks.md +++ b/articles/aks/monitor-aks.md @@ -41,7 +41,7 @@ You require at least one Log Analytics workspace to support Container insights a If you're just getting started with Azure Monitor, then start with a single workspace and consider creating additional workspaces as your requirements evolve. Many environments will use a single workspace for all the Azure resources they monitor. You can even share a workspace used by [Microsoft Defender for Cloud and Microsoft Sentinel](../azure-monitor/vm/monitor-virtual-machine-security.md), although many customers choose to segregate their availability and performance telemetry from security data. -See [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/design-logs-deployment.md) for details on logic that you should consider for designing a workspace configuration. +See [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/workspace-design.md) for details on logic that you should consider for designing a workspace configuration. ### Enable container insights When you enable Container insights for your AKS cluster, it deploys a containerized version of the [Log Analytics agent](../agents/../azure-monitor/agents/log-analytics-agent.md) that sends data to Azure Monitor. There are multiple methods to enable it depending whether you're working with a new or existing AKS cluster. See [Enable Container insights](../azure-monitor/containers/container-insights-onboard.md) for prerequisites and configuration options. diff --git a/articles/aks/nat-gateway.md b/articles/aks/nat-gateway.md index 75d5ff05ea8f..272557a1e1c4 100644 --- a/articles/aks/nat-gateway.md +++ b/articles/aks/nat-gateway.md @@ -1,5 +1,5 @@ --- -title: Managed NAT Gateway (preview) +title: Managed NAT Gateway description: Learn how to create an AKS cluster with managed NAT integration services: container-service ms.topic: article @@ -7,7 +7,7 @@ ms.date: 10/26/2021 ms.author: juda --- -# Managed NAT Gateway (preview) +# Managed NAT Gateway Whilst AKS customers are able to route egress traffic through an Azure Load Balancer, there are limitations on the amount of outbound flows of traffic that is possible. @@ -15,48 +15,14 @@ Azure NAT Gateway allows up to 64,000 outbound UDP and TCP traffic flows per IP This article will show you how to create an AKS cluster with a Managed NAT Gateway for egress traffic. -[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] ## Before you begin To use Managed NAT gateway, you must have the following: * The latest version of the Azure CLI -* The `aks-preview` extension version 0.5.31 or later * Kubernetes version 1.20.x or above -### Install aks-preview CLI extension - -You also need the *aks-preview* Azure CLI extension version 0.5.31 or later. Install the *aks-preview* Azure CLI extension by using the [az extension add][az-extension-add] command. Or install any available updates by using the [az extension update][az-extension-update] command. - -```azurecli-interactive -# Install the aks-preview extension -az extension add --name aks-preview - -# Update the extension to make sure you have the latest version installed -az extension update --name aks-preview -``` - -### Register the `AKS-NATGatewayPreview` feature flag - -To use the NAT Gateway feature, you must enable the `AKS-NATGatewayPreview` feature flag on your subscription. - -```azurecli -az feature register --namespace "Microsoft.ContainerService" --name "AKS-NATGatewayPreview" -``` -You can check on the registration status by using the [az feature list][az-feature-list] command: - -```azurecli-interactive -az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/AKS-NATGatewayPreview')].{Name:name,State:properties.state}" -``` - -When ready, refresh the registration of the *Microsoft.ContainerService* resource provider by using the [az provider register][az-provider-register] command: - -```azurecli-interactive -az provider register --namespace Microsoft.ContainerService -``` - - ## Create an AKS cluster with a Managed NAT Gateway To create an AKS cluster with a new Managed NAT Gateway, use `--outbound-type managedNATGateway` as well as `--nat-gateway-managed-outbound-ip-count` and `--nat-gateway-idle-timeout` when running `az aks create`. The following example creates a *myresourcegroup* resource group, then creates a *natcluster* AKS cluster in *myresourcegroup* with a Managed NAT Gateway, two outbound IPs, and an idle timeout of 30 seconds. diff --git a/articles/aks/node-auto-repair.md b/articles/aks/node-auto-repair.md index c0d47f242d35..932ee79b7ecb 100644 --- a/articles/aks/node-auto-repair.md +++ b/articles/aks/node-auto-repair.md @@ -35,7 +35,7 @@ If AKS identifies an unhealthy node that remains unhealthy for 10 minutes, AKS t 1. Reboot the node. 1. If the reboot is unsuccessful, reimage the node. -1. If the reimage is unsuccessful, redploy the node. +1. If the reimage is unsuccessful, redeploy the node. Alternative remediations are investigated by AKS engineers if auto-repair is unsuccessful. diff --git a/articles/aks/quickstart-dapr.md b/articles/aks/quickstart-dapr.md index c3d032a3148c..fe931410894e 100644 --- a/articles/aks/quickstart-dapr.md +++ b/articles/aks/quickstart-dapr.md @@ -1,6 +1,6 @@ --- -title: Deploy an application with the Dapr cluster extension for Azure Kubernetes Service (AKS) -description: Use the Dapr cluster extension for Azure Kubernetes Service (AKS) to deploy an application +title: Deploy an application with the Dapr cluster extension for Azure Kubernetes Service (AKS) or Arc-enabled Kubernetes +description: Use the Dapr cluster extension for Azure Kubernetes Service (AKS) or Arc-enabled Kubernetes to deploy an application author: nickomang ms.author: nickoman ms.service: container-service @@ -9,15 +9,15 @@ ms.date: 05/03/2022 ms.custom: template-quickstart, mode-other, event-tier1-build-2022 --- -# Quickstart: Deploy an application using the Dapr cluster extension for Azure Kubernetes Service (AKS) +# Quickstart: Deploy an application using the Dapr cluster extension for Azure Kubernetes Service (AKS) or Arc-enabled Kubernetes -In this quickstart, you will get familiar with using the [Dapr cluster extension][dapr-overview] in an AKS cluster. You will be deploying a hello world example, consisting of a Python application that generates messages and a Node application that consumes and persists them. +In this quickstart, you will get familiar with using the [Dapr cluster extension][dapr-overview] in an AKS or Arc-enabled Kubernetes cluster. You will be deploying a hello world example, consisting of a Python application that generates messages and a Node application that consumes and persists them. ## Prerequisites * An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). * [Azure CLI installed](/cli/azure/install-azure-cli). -* An AKS cluster with the [Dapr cluster extension][dapr-overview] enabled +* An AKS or Arc-enabled Kubernetes cluster with the [Dapr cluster extension][dapr-overview] enabled ## Clone the repository @@ -201,7 +201,7 @@ You should see the latest JSON in the response. ## Clean up resources -Use the [az group delete][az-group-delete] command to remove the resource group, the AKS cluster, namespace, and all related resources. +Use the [az group delete][az-group-delete] command to remove the resource group, the cluster, the namespace, and all related resources. ```azurecli-interactive az group delete --name MyResourceGroup diff --git a/articles/aks/support-policies.md b/articles/aks/support-policies.md index 0e2b03064b5d..d0e154fd2407 100644 --- a/articles/aks/support-policies.md +++ b/articles/aks/support-policies.md @@ -33,7 +33,7 @@ Microsoft manages and monitors the following components through the control pane AKS isn't a Platform-as-a-Service (PaaS) solution. Some components, such as agent nodes, have *shared responsibility*, where users must help maintain the AKS cluster. User input is required, for example, to apply an agent node operating system (OS) security patch. -The services are *managed* in the sense that Microsoft and the AKS team deploys, operates, and is responsible for service availability and functionality. Customers can't alter these managed components. Microsoft limits customization to ensure a consistent and scalable user experience. For a fully customizable solution, see [AKS Engine](https://github.com/Azure/aks-engine). +The services are *managed* in the sense that Microsoft and the AKS team deploys, operates, and is responsible for service availability and functionality. Customers can't alter these managed components. Microsoft limits customization to ensure a consistent and scalable user experience. ## Shared responsibility diff --git a/articles/aks/supported-kubernetes-versions.md b/articles/aks/supported-kubernetes-versions.md index ef02286b9b26..7c4bb4eab8c1 100644 --- a/articles/aks/supported-kubernetes-versions.md +++ b/articles/aks/supported-kubernetes-versions.md @@ -264,7 +264,6 @@ Patches have a two month minimum lifecycle. To keep up to date when new patches For information on how to upgrade your cluster, see [Upgrade an Azure Kubernetes Service (AKS) cluster][aks-upgrade]. -[aks-engine]: https://github.com/Azure/aks-engine [azure-update-channel]: https://azure.microsoft.com/updates/?product=kubernetes-service diff --git a/articles/aks/use-managed-identity.md b/articles/aks/use-managed-identity.md index 1cb37f7a47ba..c57179e93438 100644 --- a/articles/aks/use-managed-identity.md +++ b/articles/aks/use-managed-identity.md @@ -215,6 +215,9 @@ A successful cluster creation using your own managed identities contains this us A Kubelet identity enables access to be granted to the existing identity prior to cluster creation. This feature enables scenarios such as connection to ACR with a pre-created managed identity. +> [!WARNING] +> Updating kubelet MI will upgrade Nodepool, which causes downtime for your AKS cluster as the nodes in the nodepools will be cordoned/drained and then reimaged. + ### Prerequisites - You must have the Azure CLI, version 2.26.0 or later installed. diff --git a/articles/aks/use-multiple-node-pools.md b/articles/aks/use-multiple-node-pools.md index 8b42fbe22960..fb1088c55f53 100644 --- a/articles/aks/use-multiple-node-pools.md +++ b/articles/aks/use-multiple-node-pools.md @@ -181,7 +181,7 @@ A workload may require splitting a cluster's nodes into separate pools for logic * All subnets assigned to node pools must belong to the same virtual network. * System pods must have access to all nodes/pods in the cluster to provide critical functionality such as DNS resolution and tunneling kubectl logs/exec/port-forward proxy. -* If you expand your VNET after creating the cluster you must update your cluster (perform any managed cluster operation but node pool operations don't count) before adding a subnet outside the original cidr. AKS will error out on the agent pool add now though we originally allowed it. If you don't know how to reconcile your cluster file a support ticket. +* If you expand your VNET after creating the cluster you must update your cluster (perform any managed cluster operation but node pool operations don't count) before adding a subnet outside the original cidr. AKS will error out on the agent pool add now though we originally allowed it. The `aks-preview` Azure CLI extension (version 0.5.66+) now supports running `az aks update -g -n ` without any optional arguments. This command will perform an update operation without making any changes, which can recover a cluster stuck in a failed state. * In clusters with Kubernetes version < 1.23.3, kube-proxy will SNAT traffic from new subnets, which can cause Azure Network Policy to drop the packets. * Windows nodes will SNAT traffic to the new subnets until the node pool is reimaged. * Internal load balancers default to one of the node pool subnets (usually the first subnet of the node pool at cluster creation). To override this behavior, you can [specify the load balancer's subnet explicitly using an annotation][internal-lb-different-subnet]. diff --git a/articles/aks/use-network-policies.md b/articles/aks/use-network-policies.md index a81d4d4e5ada..46236fe55142 100644 --- a/articles/aks/use-network-policies.md +++ b/articles/aks/use-network-policies.md @@ -4,7 +4,7 @@ titleSuffix: Azure Kubernetes Service description: Learn how to secure traffic that flows in and out of pods by using Kubernetes network policies in Azure Kubernetes Service (AKS) services: container-service ms.topic: article -ms.date: 03/16/2021 +ms.date: 03/29/2022 --- @@ -100,7 +100,7 @@ az network vnet create \ --subnet-prefix 10.240.0.0/16 # Create a service principal and read in the application ID -SP=$(az ad sp create-for-rbac --role Contributor --output json) +SP=$(az ad sp create-for-rbac --output json) SP_ID=$(echo $SP | jq -r .appId) SP_PASSWORD=$(echo $SP | jq -r .password) @@ -239,7 +239,13 @@ kubectl run backend --image=mcr.microsoft.com/oss/nginx/nginx:1.15.5-alpine --la Create another pod and attach a terminal session to test that you can successfully reach the default NGINX webpage: ```console -kubectl run --rm -it --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 network-policy --namespace development +kubectl run --rm -it --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 network-policy --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to confirm that you can access the default NGINX webpage: @@ -295,7 +301,13 @@ kubectl apply -f backend-policy.yaml Let's see if you can use the NGINX webpage on the back-end pod again. Create another test pod and attach a terminal session: ```console -kubectl run --rm -it --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 network-policy --namespace development +kubectl run --rm -it --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 network-policy --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see if you can access the default NGINX webpage. This time, set a timeout value to *2* seconds. The network policy now blocks all inbound traffic, so the page can't be loaded, as shown in the following example: @@ -352,7 +364,13 @@ kubectl apply -f backend-policy.yaml Schedule a pod that is labeled as *app=webapp,role=frontend* and attach a terminal session: ```console -kubectl run --rm -it frontend --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 --labels app=webapp,role=frontend --namespace development +kubectl run --rm -it frontend --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 --labels app=webapp,role=frontend --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see if you can access the default NGINX webpage: @@ -382,7 +400,13 @@ exit The network policy allows traffic from pods labeled *app: webapp,role: frontend*, but should deny all other traffic. Let's test to see whether another pod without those labels can access the back-end NGINX pod. Create another test pod and attach a terminal session: ```console -kubectl run --rm -it --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 network-policy --namespace development +kubectl run --rm -it --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 network-policy --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see if you can access the default NGINX webpage. The network policy blocks the inbound traffic, so the page can't be loaded, as shown in the following example: @@ -415,7 +439,13 @@ kubectl label namespace/production purpose=production Schedule a test pod in the *production* namespace that is labeled as *app=webapp,role=frontend*. Attach a terminal session: ```console -kubectl run --rm -it frontend --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 --labels app=webapp,role=frontend --namespace production +kubectl run --rm -it frontend --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 --labels app=webapp,role=frontend --namespace production +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to confirm that you can access the default NGINX webpage: @@ -479,7 +509,13 @@ kubectl apply -f backend-policy.yaml Schedule another pod in the *production* namespace and attach a terminal session: ```console -kubectl run --rm -it frontend --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 --labels app=webapp,role=frontend --namespace production +kubectl run --rm -it frontend --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 --labels app=webapp,role=frontend --namespace production +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see that the network policy now denies traffic: @@ -501,7 +537,13 @@ exit With traffic denied from the *production* namespace, schedule a test pod back in the *development* namespace and attach a terminal session: ```console -kubectl run --rm -it frontend --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 --labels app=webapp,role=frontend --namespace development +kubectl run --rm -it frontend --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 --labels app=webapp,role=frontend --namespace development +``` + +Install `wget`: + +```console +apt-get update && apt-get install -y wget ``` At the shell prompt, use `wget` to see that the network policy allows the traffic: diff --git a/articles/aks/virtual-nodes-cli.md b/articles/aks/virtual-nodes-cli.md index 5b10f6dc021c..19544f279df3 100644 --- a/articles/aks/virtual-nodes-cli.md +++ b/articles/aks/virtual-nodes-cli.md @@ -241,7 +241,7 @@ The pod is assigned an internal IP address from the Azure virtual network subnet To test the pod running on the virtual node, browse to the demo application with a web client. As the pod is assigned an internal IP address, you can quickly test this connectivity from another pod on the AKS cluster. Create a test pod and attach a terminal session to it: ```console -kubectl run -it --rm testvk --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 +kubectl run -it --rm testvk --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 ``` Install `curl` in the pod using `apt-get`: @@ -299,13 +299,19 @@ AKS_SUBNET=myVirtualNodeSubnet NODE_RES_GROUP=$(az aks show --resource-group $RES_GROUP --name $AKS_CLUSTER --query nodeResourceGroup --output tsv) # Get network profile ID -NETWORK_PROFILE_ID=$(az network profile list --resource-group $NODE_RES_GROUP --query [0].id --output tsv) +NETWORK_PROFILE_ID=$(az network profile list --resource-group $NODE_RES_GROUP --query "[0].id" --output tsv) # Delete the network profile az network profile delete --id $NETWORK_PROFILE_ID -y +# Grab the service association link ID +SAL_ID=$(az network vnet subnet show --resource-group $RES_GROUP --vnet-name $AKS_VNET --name $AKS_SUBNET --query id --output tsv)/providers/Microsoft.ContainerInstance/serviceAssociationLinks/default + +# Delete the service association link for the subnet +az resource delete --ids $SAL_ID --api-version {api-version} + # Delete the subnet delegation to Azure Container Instances -az network vnet subnet update --resource-group $RES_GROUP --vnet-name $AKS_VNET --name $AKS_SUBNET --remove delegations 0 +az network vnet subnet update --resource-group $RES_GROUP --vnet-name $AKS_VNET --name $AKS_SUBNET --remove delegations ``` ## Next steps diff --git a/articles/aks/virtual-nodes-portal.md b/articles/aks/virtual-nodes-portal.md index c3b14f443999..154be1d4293e 100644 --- a/articles/aks/virtual-nodes-portal.md +++ b/articles/aks/virtual-nodes-portal.md @@ -153,7 +153,7 @@ The pod is assigned an internal IP address from the Azure virtual network subnet To test the pod running on the virtual node, browse to the demo application with a web client. As the pod is assigned an internal IP address, you can quickly test this connectivity from another pod on the AKS cluster. Create a test pod and attach a terminal session to it: ```console -kubectl run -it --rm virtual-node-test --image=mcr.microsoft.com/aks/fundamental/base-ubuntu:v0.0.11 +kubectl run -it --rm virtual-node-test --image=mcr.microsoft.com/dotnet/runtime-deps:6.0 ``` Install `curl` in the pod using `apt-get`: diff --git a/articles/aks/web-app-routing.md b/articles/aks/web-app-routing.md new file mode 100644 index 000000000000..0dce3c5d4ec3 --- /dev/null +++ b/articles/aks/web-app-routing.md @@ -0,0 +1,252 @@ +--- +title: Web Application Routing add-on on Azure Kubernetes Service (AKS) (Preview) +description: Use the Web Application Routing add-on to securely access applications deployed on Azure Kubernetes Service (AKS). +services: container-service +author: jahabibi +ms.topic: article +ms.date: 05/13/2021 +ms.author: jahabibi +--- + +# Web Application Routing (Preview) + +The Web Application Routing solution makes it easy to access applications that are deployed to your Azure Kubernetes Service (AKS) cluster. When the solution's enabled, it configures an [Ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) in your AKS cluster, SSL termination, and Open Service Mesh (OSM) for E2E encryption of inter cluster communication. As applications are deployed, the solution also creates publicly accessible DNS names for application endpoints. + +[!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] + +## Limitations + +- Web Application Routing currently doesn't support named ports in ingress backend. + +## Web Application Routing solution overview + +The add-on deploys four components: an [nginx ingress controller][nginx], [Secrets Store CSI Driver][csi-driver], [Open Service Mesh (OSM)][osm], and [External-DNS][external-dns] controller. + +- **Nginx ingress Controller**: The ingress controller exposed to the internet. +- **External-dns**: Watches for Kubernetes Ingress resources and creates DNS A records in the cluster-specific DNS zone. +- **CSI driver**: Connector used to communicate with keyvault to retrieve SSL certificates for ingress controller. +- **OSM**: A lightweight, extensible, cloud native service mesh that allows users to uniformly manage, secure, and get out-of-the-box observability features for highly dynamic microservice environments. +- **External-DNS controller**: Watches for Kubernetes Ingress resources and creates DNS A records in the cluster-specific DNS zone. + +## Prerequisites + +- An Azure subscription. If you don't have an Azure subscription, you can create a [free account](https://azure.microsoft.com/free). +- [Azure CLI installed](/cli/azure/install-azure-cli). + +### Install the `aks-preview` Azure CLI extension + +You also need the *aks-preview* Azure CLI extension version `0.5.75` or later. Install the *aks-preview* Azure CLI extension by using the [az extension add][az-extension-add] command. Or install any available updates by using the [az extension update][az-extension-update] command. + +```azurecli-interactive +# Install the aks-preview extension +az extension add --name aks-preview + +# Update the extension to make sure you have the latest version installed +az extension update --name aks-preview +``` + +### Install the `osm` CLI + +Since Web Application Routing uses OSM internally to secure intranet communication, we need to set up the `osm` CLI. This command-line tool contains everything needed to install and configure Open Service Mesh. The binary is available on the [OSM GitHub releases page][osm-release]. + +## Deploy Web Application Routing with the Azure CLI + +The Web Application Routing routing add-on can be enabled with the Azure CLI when deploying an AKS cluster. To do so, use the [az aks create][az-aks-create] command with the `--enable-addons` argument. + +```azurecli +az aks create --resource-group myResourceGroup --name myAKSCluster --enable-addons web_application_routing +``` + +> [!TIP] +> If you want to enable multiple add-ons, provide them as a comma-separated list. For example, to enable Web Application Routing routing and monitoring, use the format `--enable-addons web_application_routing,monitoring`. + +You can also enable Web Application Routing on an existing AKS cluster using the [az aks enable-addons][az-aks-enable-addons] command. To enable Web Application Routing on an existing cluster, add the `--addons` parameter and specify *web_application_routing* as shown in the following example: + +```azurecli +az aks enable-addons --resource-group myResourceGroup --name myAKSCluster --addons web_application_routing +``` + +After the cluster is deployed or updated, use the [az aks show][az-aks-show] command to retrieve the DNS zone name. + +## Connect to your AKS cluster + +To connect to the Kubernetes cluster from your local computer, you use [kubectl][kubectl], the Kubernetes command-line client. + +If you use the Azure Cloud Shell, `kubectl` is already installed. You can also install it locally using the `az aks install-cli` command: + +```azurecli +az aks install-cli +``` + +To configure `kubectl` to connect to your Kubernetes cluster, use the [az aks get-credentials][az-aks-get-credentials] command. The following example gets credentials for the AKS cluster named *MyAKSCluster* in the *MyResourceGroup*: + +```azurecli +az aks get-credentials --resource-group MyResourceGroup --name MyAKSCluster +``` + +## Create the application namespace + +For the sample application environment, let's first create a namespace called `hello-web-app-routing` to run the example pods: + +```bash +kubectl create namespace hello-web-app-routing +``` + +We also need to add the application namespace to the OSM control plane: + +```bash +osm namespace add hello-web-app-routing +``` + +## Grant permissions for Web Application Routing + +Identify the Web Application Routing-associated managed identity within the cluster resource group `webapprouting-`. In this walkthrough, the identity is named `webapprouting-myakscluster`. + +:::image type="content" source="media/web-app-routing/identify-msi-web-app-routing.png" alt-text="Cluster resource group in the Azure portal is shown, and the webapprouting-myakscluster user-assigned managed identity is highlighted." lightbox="media/web-app-routing/identify-msi-web-app-routing.png"::: + +Copy the identity's object ID: + +:::image type="content" source="media/web-app-routing/msi-web-app-object-id.png" alt-text="The webapprouting-myakscluster managed identity screen in Azure portal, the identity's object ID is highlighted. " lightbox="media/web-app-routing/msi-web-app-object-id.png"::: + +### Grant access to Azure Key Vault + +Grant `GET` permissions for Web Application Routing to retrieve certificates from Azure Key Vault: + +```azurecli +az keyvault set-policy --name myapp-contoso --object-id --secret-permissions get --certificate-permissions get +``` + +## Use Web Application Routing + +The Web Application Routing solution may only be triggered on service resources that are annotated as follows: + +```yaml +annotations: + kubernetes.azure.com/ingress-host: myapp.contoso.com + kubernetes.azure.com/tls-cert-keyvault-uri: myapp-contoso.vault.azure.net +``` + +These annotations in the service manifest would direct Web Application Routing to create an ingress servicing `myapp.contoso.com` connected to the keyvault `myapp-contoso`. + +Create a file named **samples-web-app-routing.yaml** and copy in the following YAML. On line 29-31, update `` and `` with the DNS zone name collected in the previous step of this article. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aks-helloworld +spec: + replicas: 1 + selector: + matchLabels: + app: aks-helloworld + template: + metadata: + labels: + app: aks-helloworld + spec: + containers: + - name: aks-helloworld + image: mcr.microsoft.com/azuredocs/aks-helloworld:v1 + ports: + - containerPort: 80 + env: + - name: TITLE + value: "Welcome to Azure Kubernetes Service (AKS)" +--- +apiVersion: v1 +kind: Service +metadata: + name: aks-helloworld +annotations: + kubernetes.azure.com/ingress-host: + kubernetes.azure.com/tls-cert-keyvault-uri: +spec: + type: ClusterIP + ports: + - port: 80 + selector: + app: aks-helloworld +``` + +Use the [kubectl apply][kubectl-apply] command to create the resources. + +```bash +kubectl apply -f samples-web-app-routing.yaml -n hello-web-app-routing +``` + +The following example shows the created resources: + +```bash +$ kubectl apply -f samples-web-app-routing.yaml -n hello-web-app-routing + +deployment.apps/aks-helloworld created +service/aks-helloworld created +``` + +## Verify the managed ingress was created + +```bash +$ kubectl get ingress -n hello-web-app-routing -n hello-web-app-routing +``` + +Open a web browser to **, for example *myapp.contoso.com* and verify you see the demo application. The application may take a few minutes to appear. + +## Remove Web Application Routing + +First, remove the associated namespace: + +```bash +kubectl delete namespace hello-web-app-routing +``` + +The Web Application Routing add-on can be removed using the Azure CLI. To do so run the following command, substituting your AKS cluster and resource group name. + +```azurecli +az aks disable-addons --addons web_application_routing --name myAKSCluster --resource-group myResourceGroup --no-wait +``` + +When the Web Application Routing add-on is disabled, some Kubernetes resources may remain in the cluster. These resources include *configMaps* and *secrets*, and are created in the *app-routing-system* namespace. To maintain a clean cluster, you may want to remove these resources. + +Look for *addon-web-application-routing* resources using the following [kubectl get][kubectl-get] commands: + +## Clean up + +Remove the associated Kubernetes objects created in this article using `kubectl delete`. + +```bash +kubectl delete -f samples-web-app-routing.yaml +``` + +The example output shows Kubernetes objects have been removed. + +```bash +$ kubectl delete -f samples-web-app-routing.yaml + +deployment "aks-helloworld" deleted +service "aks-helloworld" deleted +``` + + +[az-aks-create]: /cli/azure/aks#az-aks-create +[az-aks-show]: /cli/azure/aks#az-aks-show +[ingress-https]: ./ingress-tls.md +[az-aks-enable-addons]: /cli/azure/aks#az-aks-enable-addons +[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli +[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials +[csi-driver]: https://github.com/Azure/secrets-store-csi-driver-provider-azure +[az-extension-add]: /cli/azure/extension#az-extension-add +[az-extension-update]: /cli/azure/extension#az-extension-update + + +[osm-release]: https://github.com/openservicemesh/osm/releases/ +[nginx]: https://kubernetes.github.io/ingress-nginx/ +[osm]: https://openservicemesh.io/ +[external-dns]: https://github.com/kubernetes-incubator/external-dns +[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get +[kubectl-delete]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#delete +[kubectl-logs]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#logs +[ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[ingress-resource]: https://kubernetes.io/docs/concepts/services-networking/ingress/#the-ingress-resource \ No newline at end of file diff --git a/articles/aks/windows-faq.md b/articles/aks/windows-faq.md index 40d713ed4dda..b3e170d8c588 100644 --- a/articles/aks/windows-faq.md +++ b/articles/aks/windows-faq.md @@ -195,17 +195,12 @@ Use the following configuration: 1. In your Kubernetes service configuration, set **externalTrafficPolicy=Local**. This ensures that the Kubernetes service directs traffic only to pods within the local node. 1. In your Kubernetes service configuration, set **sessionAffinity: ClientIP**. This ensures that the Azure Load Balancer gets configured with session affinity. -## What if I need a feature that's not supported? - -If you encounter feature gaps, the open-source [aks-engine][aks-engine] project provides an easy and fully customizable way of running Kubernetes in Azure, including Windows support. For more information, see [AKS roadmap][aks-roadmap]. - ## Next steps To get started with Windows Server containers in AKS, see [Create a node pool that runs Windows Server in AKS][windows-node-cli]. [kubernetes]: https://kubernetes.io -[aks-engine]: https://github.com/azure/aks-engine [upstream-limitations]: https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations [intro-windows]: https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/ [aks-roadmap]: https://github.com/Azure/AKS/projects/1 diff --git a/articles/api-management/api-management-access-restriction-policies.md b/articles/api-management/api-management-access-restriction-policies.md index 9f78d28b8d4d..42774b1bfd97 100644 --- a/articles/api-management/api-management-access-restriction-policies.md +++ b/articles/api-management/api-management-access-restriction-policies.md @@ -280,6 +280,9 @@ This policy can be used in the following policy [sections](./api-management-howt - **Policy sections:** inbound - **Policy scopes:** all scopes +> [!NOTE] +> If you configure this policy at more than one scope, IP filtering is applied in the order of [policy evaluation](set-edit-policies.md#use-base-element-to-set-policy-evaluation-order) in your policy definition. + ## Set usage quota by subscription The `quota` policy enforces a renewable or lifetime call volume and/or bandwidth quota, on a per subscription basis. diff --git a/articles/api-management/api-management-advanced-policies.md b/articles/api-management/api-management-advanced-policies.md index e29a6ef18032..e8e579e4286d 100644 --- a/articles/api-management/api-management-advanced-policies.md +++ b/articles/api-management/api-management-advanced-policies.md @@ -713,7 +713,7 @@ This sample policy shows an example of using the `send-one-way-request` policy t - https://hooks.slack.com/services/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 + https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX POST @{ return new JObject( diff --git a/articles/api-management/api-management-howto-aad.md b/articles/api-management/api-management-howto-aad.md index e67741f224a0..cfd7eda37cf9 100644 --- a/articles/api-management/api-management-howto-aad.md +++ b/articles/api-management/api-management-howto-aad.md @@ -1,18 +1,12 @@ --- -title: Authorize developer accounts by using Azure Active Directory +title: Authorize access to API Management developer portal by using Azure AD titleSuffix: Azure API Management -description: Learn how to authorize users by using Azure Active Directory in API Management. -services: api-management -documentationcenter: API Management -author: dlepow -manager: cfowler -editor: '' +description: Learn how to enable user sign-in to the API Management developer portal by using Azure Active Directory. +author: dlepow ms.service: api-management -ms.workload: mobile -ms.tgt_pltfrm: na ms.topic: article -ms.date: 09/20/2021 +ms.date: 05/20/2022 ms.author: danlep --- @@ -27,19 +21,39 @@ In this article, you'll learn how to: - Complete the [Create an Azure API Management instance](get-started-create-service-instance.md) quickstart. -- [Import and publish](import-and-publish.md) an Azure API Management instance. +- [Import and publish](import-and-publish.md) an API in the Azure API Management instance. [!INCLUDE [azure-cli-prepare-your-environment-no-header.md](../../includes/azure-cli-prepare-your-environment-no-header.md)] [!INCLUDE [premium-dev-standard.md](../../includes/api-management-availability-premium-dev-standard.md)] -## Authorize developer accounts by using Azure AD +[!INCLUDE [api-management-navigate-to-instance.md](../../includes/api-management-navigate-to-instance.md)] + + +## Enable user sign-in using Azure AD - portal + +To simplify the configuration, API Management can automatically enable an Azure AD application and identity provider for users of the developer portal. Alternatively, you can manually enable the Azure AD application and identity provider. + +### Automatically enable Azure AD application and identity provider + +1. In the left menu of your API Management instance, under **Developer portal**, select **Portal overview**. +1. On the **Portal overview** page, scroll down to **Enable user sign-in with Azure Active Directory**. +1. Select **Enable Azure AD**. +1. On the **Enable Azure AD** page, select **Enable Azure AD**. +1. Select **Close**. -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Select ![Arrow icon.](./media/api-management-howto-aad/arrow.png). -1. Search for and select **API Management services**. -1. Select your API Management service instance. -1. Under **Developer portal**, select **Identities**. + :::image type="content" source="media/api-management-howto-aad/enable-azure-ad-portal.png" alt-text="Screenshot of enabling Azure AD in the developer portal overview page."::: + +After the Azure AD provider is enabled: + +* Users in the specified Azure AD instance can [sign into the developer portal by using an Azure AD account](#log_in_to_dev_portal). +* You can manage the Azure AD configuration on the **Developer portal** > **Identities** page in the portal. +* Optionally configure other sign-in settings by selecting **Identities** > **Settings**. For example, you might want to redirect anonymous users to the sign-in page. +* Republish the developer portal after any configuration change. + +### Manually enable Azure AD application and identity provider + +1. In the left menu of your API Management instance, under **Developer portal**, select **Identities**. 1. Select **+Add** from the top to open the **Add identity provider** pane to the right. 1. Under **Type**, select **Azure Active Directory** from the drop-down menu. * Once selected, you'll be able to enter other necessary information. @@ -47,7 +61,7 @@ In this article, you'll learn how to: * See more information about these controls later in the article. 1. Save the **Redirect URL** for later. - :::image type="content" source="media/api-management-howto-aad/api-management-with-aad001.png" alt-text="Add identity provider in Azure portal"::: + :::image type="content" source="media/api-management-howto-aad/api-management-with-aad001.png" alt-text="Screenshot of adding identity provider in Azure portal."::: > [!NOTE] > There are two redirect URLs:
    @@ -60,9 +74,9 @@ In this article, you'll learn how to: 1. Navigate to [App registrations](https://go.microsoft.com/fwlink/?linkid=2083908) to register an app in Active Directory. 1. Select **New registration**. On the **Register an application** page, set the values as follows: - * Set **Name** to a meaningful name. e.g., *developer-portal* + * Set **Name** to a meaningful name such as *developer-portal* * Set **Supported account types** to **Accounts in this organizational directory only**. - * Set **Redirect URI** to the value you saved from step 9. + * In **Redirect URI**, select **Web** and paste the redirect URL you saved from a previous step. * Select **Register**. 1. After you've registered the application, copy the **Application (client) ID** from the **Overview** page. @@ -77,14 +91,19 @@ In this article, you'll learn how to: * Choose **Add**. 1. Copy the client **Secret value** before leaving the page. You will need it later. 1. Under **Manage** in the side menu, select **Authentication**. -1. Under the **Implicit grant and hybrid flows** sections, select the **ID tokens** checkbox. + 1. Under the **Implicit grant and hybrid flows** section, select the **ID tokens** checkbox. + 1. Select **Save**. +1. Under **Manage** in the side menu, select **Token configuration** > **+ Add optional claim**. + 1. In **Token type**, select **ID**. + 1. Select (check) the following claims: **email**, **family_name**, **given_name**. + 1. Select **Add**. If prompted, select **Turn on the Microsoft Graph email, profile permission**. 1. Switch to the browser tab with your API Management instance. 1. Paste the secret into the **Client secret** field in the **Add identity provider** pane. > [!IMPORTANT] > Update the **Client secret** before the key expires. -1. In the **Add identity provider** pane's **Allowed Tenants** field, specify the Azure AD instances' domains to which you want to grant access to the API Management service instance APIs. +1. In the **Add identity provider** pane's **Allowed tenants** field, specify the Azure AD instance's domains to which you want to grant access to the API Management service instance APIs. * You can separate multiple domains with newlines, spaces, or commas. > [!NOTE] @@ -93,9 +112,15 @@ In this article, you'll learn how to: > 1. Enter the domain name of the Azure AD tenant to which they want to grant access. > 1. Select **Submit**. -1. After you specify the desired configuration, select **Add**. +1. After you specify the desired configuration, select **Add**. +1. Republish the developer portal for the Azure AD configuration to take effect. In the left menu, under **Developer portal**, select **Portal overview** > **Publish**. -Once changes are saved, users in the specified Azure AD instance can [sign into the developer portal by using an Azure AD account](#log_in_to_dev_portal). +After the Azure AD provider is enabled: + +* Users in the specified Azure AD instance can [sign into the developer portal by using an Azure AD account](#log_in_to_dev_portal). +* You can manage the Azure AD configuration on the **Developer portal** > **Identities** page in the portal. +* Optionally configure other sign-in settings by selecting **Identities** > **Settings**. For example, you might want to redirect anonymous users to the sign-in page. +* Republish the developer portal after any configuration change. ## Add an external Azure AD group @@ -120,20 +145,20 @@ Follow these steps to grant: az rest --method PATCH --uri "https://graph.microsoft.com/v1.0/$($tenantId)/applications/$($appObjectID)" --body "{'requiredResourceAccess':[{'resourceAccess': [{'id': 'e1fe6dd8-ba31-4d61-89e7-88639da4683d','type': 'Scope'},{'id': '7ab1d382-f21e-4acd-a863-ba3e13f7da61','type': 'Role'}],'resourceAppId': '00000003-0000-0000-c000-000000000000'}]}" ``` -2. Log out and log back in to the Azure portal. -3. Navigate to the App Registration page for the application you registered in [the previous section](#authorize-developer-accounts-by-using-azure-ad). -4. Click **API Permissions**. You should see the permissions granted by the Azure CLI script in step 1. -5. Select **Grant admin consent for {tenantname}** so that you grant access for all users in this directory. +1. Sign out and sign back in to the Azure portal. +1. Navigate to the App Registration page for the application you registered in [the previous section](#enable-user-sign-in-using-azure-ad---portal). +1. Select **API Permissions**. You should see the permissions granted by the Azure CLI script in step 1. +1. Select **Grant admin consent for {tenantname}** so that you grant access for all users in this directory. Now you can add external Azure AD groups from the **Groups** tab of your API Management instance. 1. Under **Developer portal** in the side menu, select **Groups**. -2. Select the **Add Azure AD group** button. +1. Select the **Add Azure AD group** button. - !["Add A A D group" button](./media/api-management-howto-aad/api-management-with-aad008.png) + !["Screenshot showing Add Azure AD group button.](./media/api-management-howto-aad/api-management-with-aad008.png) 1. Select the **Tenant** from the drop-down. -2. Search for and select the group that you want to add. -3. Press the **Select** button. +1. Search for and select the group that you want to add. +1. Press the **Select** button. Once you add an external Azure AD group, you can review and configure its properties: 1. Select the name of the group from the **Groups** tab. @@ -144,12 +169,15 @@ Users from the configured Azure AD instance can now: * View and subscribe to any groups for which they have visibility. > [!NOTE] -> Learn more about the difference between **Delegated** and **Application** permissions types in [Permissions and consent in the Microsoft identity platform](../active-directory/develop/v2-permissions-and-consent.md#permission-types) article. +> Learn more about the difference between **Delegated** and **Application** permissions types in [Permissions and consent in the Microsoft identity platform](../active-directory/develop/v2-permissions-and-consent.md#permission-types) article. ## Developer portal: Add Azure AD account authentication In the developer portal, you can sign in with Azure AD using the **Sign-in button: OAuth** widget included on the sign-in page of the default developer portal content. +:::image type="content" source="media/api-management-howto-aad/developer-portal-azure-ad-signin.png" alt-text="Screenshot showing OAuth widget in developer portal."::: + + Although a new account will automatically be created when a new user signs in with Azure AD, consider adding the same widget to the sign-up page. The **Sign-up form: OAuth** widget represents a form used for signing up with OAuth. > [!IMPORTANT] diff --git a/articles/api-management/api-management-howto-disaster-recovery-backup-restore.md b/articles/api-management/api-management-howto-disaster-recovery-backup-restore.md index 8696f4d6f57f..ea1ded555981 100644 --- a/articles/api-management/api-management-howto-disaster-recovery-backup-restore.md +++ b/articles/api-management/api-management-howto-disaster-recovery-backup-restore.md @@ -78,7 +78,7 @@ All of the tasks that you do on resources using the Azure Resource Manager must Before calling the APIs that generate the backup and restore, you need to get a token. The following example uses the [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package to retrieve the token. > [!IMPORTANT] -> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](/azure/active-directory/develop/msal-migration) for more details. +> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](../active-directory/develop/msal-migration.md) for more details. ```csharp using Microsoft.IdentityModel.Clients.ActiveDirectory; @@ -331,4 +331,4 @@ API Management **Premium** tier also supports [zone redundancy](zone-redundancy. [api-management-arm-token]: ./media/api-management-howto-disaster-recovery-backup-restore/api-management-arm-token.png [api-management-endpoint]: ./media/api-management-howto-disaster-recovery-backup-restore/api-management-endpoint.png [control-plane-ip-address]: virtual-network-reference.md#control-plane-ip-addresses -[azure-storage-ip-firewall]: ../storage/common/storage-network-security.md#grant-access-from-an-internet-ip-range +[azure-storage-ip-firewall]: ../storage/common/storage-network-security.md#grant-access-from-an-internet-ip-range \ No newline at end of file diff --git a/articles/api-management/api-management-policy-expressions.md b/articles/api-management/api-management-policy-expressions.md index 7e52ec847bdf..d08095c8f56d 100644 --- a/articles/api-management/api-management-policy-expressions.md +++ b/articles/api-management/api-management-policy-expressions.md @@ -199,7 +199,7 @@ The `context` variable is implicitly available in every policy [expression](api- |----------------------|-------------------------------------------------------| |context|[Api](#ref-context-api): [IApi](#ref-iapi)

    [Deployment](#ref-context-deployment)

    Elapsed: TimeSpan - time interval between the value of Timestamp and current time

    [LastError](#ref-context-lasterror)

    [Operation](#ref-context-operation)

    [Product](#ref-context-product)

    [Request](#ref-context-request)

    RequestId: Guid - unique request identifier

    [Response](#ref-context-response)

    [Subscription](#ref-context-subscription)

    Timestamp: DateTime - point in time when request was received

    Tracing: bool - indicates if tracing is on or off

    [User](#ref-context-user)

    [Variables](#ref-context-variables): IReadOnlyDictionary

    void Trace(message: string)| |context.Api|Id: string

    IsCurrentRevision: bool

    Name: string

    Path: string

    Revision: string

    ServiceUrl: [IUrl](#ref-iurl)

    Version: string | -|context.Deployment|GatewayId: string (returns 'managed' for managed gateways)

    Region: string

    ServiceName: string

    Certificates: IReadOnlyDictionary| +|context.Deployment|GatewayId: string (returns 'managed' for managed gateways)

    Region: string

    ServiceId: string

    ServiceName: string

    Certificates: IReadOnlyDictionary| |context.LastError|Source: string

    Reason: string

    Message: string

    Scope: string

    Section: string

    Path: string

    PolicyId: string

    For more information about context.LastError, see [Error handling](api-management-error-handling-policies.md).| |context.Operation|Id: string

    Method: string

    Name: string

    UrlTemplate: string| |context.Product|Apis: IEnumerable<[IApi](#ref-iapi)\>

    ApprovalRequired: bool

    Groups: IEnumerable<[IGroup](#ref-igroup)\>

    Id: string

    Name: string

    State: enum ProductState {NotPublished, Published}

    SubscriptionLimit: int?

    SubscriptionRequired: bool| diff --git a/articles/api-management/media/api-management-howto-aad/arrow.png b/articles/api-management/media/api-management-howto-aad/arrow.png deleted file mode 100644 index c0df8c5523d4..000000000000 Binary files a/articles/api-management/media/api-management-howto-aad/arrow.png and /dev/null differ diff --git a/articles/api-management/media/api-management-howto-aad/developer-portal-azure-ad-signin.png b/articles/api-management/media/api-management-howto-aad/developer-portal-azure-ad-signin.png new file mode 100644 index 000000000000..2b1a4a1e0ff9 Binary files /dev/null and b/articles/api-management/media/api-management-howto-aad/developer-portal-azure-ad-signin.png differ diff --git a/articles/api-management/media/api-management-howto-aad/enable-azure-ad-portal.png b/articles/api-management/media/api-management-howto-aad/enable-azure-ad-portal.png new file mode 100644 index 000000000000..1a528de39d19 Binary files /dev/null and b/articles/api-management/media/api-management-howto-aad/enable-azure-ad-portal.png differ diff --git a/articles/api-management/media/private-endpoint/api-management-private-endpoint.png b/articles/api-management/media/private-endpoint/api-management-private-endpoint.png new file mode 100644 index 000000000000..9c622add9361 Binary files /dev/null and b/articles/api-management/media/private-endpoint/api-management-private-endpoint.png differ diff --git a/articles/api-management/media/virtual-network-concepts/api-management-application-gateway.png b/articles/api-management/media/virtual-network-concepts/api-management-application-gateway.png new file mode 100644 index 000000000000..a912186f7b3f Binary files /dev/null and b/articles/api-management/media/virtual-network-concepts/api-management-application-gateway.png differ diff --git a/articles/api-management/media/virtual-network-concepts/api-management-private-endpoint.png b/articles/api-management/media/virtual-network-concepts/api-management-private-endpoint.png new file mode 100644 index 000000000000..9c622add9361 Binary files /dev/null and b/articles/api-management/media/virtual-network-concepts/api-management-private-endpoint.png differ diff --git a/articles/api-management/policy-fragments.md b/articles/api-management/policy-fragments.md index ecda6bc7e089..8fc24760d696 100644 --- a/articles/api-management/policy-fragments.md +++ b/articles/api-management/policy-fragments.md @@ -82,7 +82,7 @@ For example, insert the policy fragment named *ForwardContext* in the inbound po ``` > [!TIP] -> To see the content of an included fragment displayed in the policy definition, select **Recalculate effective policy** in the policy editor. +> To see the content of an included fragment displayed in the policy definition, select **Calculate effective policy** in the policy editor. ## Manage policy fragments diff --git a/articles/api-management/private-endpoint.md b/articles/api-management/private-endpoint.md index 12ef721464b1..48d1b348a7a9 100644 --- a/articles/api-management/private-endpoint.md +++ b/articles/api-management/private-endpoint.md @@ -5,7 +5,7 @@ ms.service: api-management author: dlepow ms.author: danlep ms.topic: how-to -ms.date: 02/23/2022 +ms.date: 03/31/2022 --- @@ -19,6 +19,8 @@ You can configure a [private endpoint](../private-link/private-endpoint-overview * Configure custom DNS settings or an Azure DNS private zone to map the API Management hostname to the endpoint's private IP address. +:::image type="content" source="media/private-endpoint/api-management-private-endpoint.png" alt-text="Diagram that shows a secure connection to API Management using private endpoint."::: + With a private endpoint and Private Link, you can: - Create multiple Private Link connections to an API Management instance. diff --git a/articles/api-management/virtual-network-concepts.md b/articles/api-management/virtual-network-concepts.md index d7303df5d9ab..f7be67f48b01 100644 --- a/articles/api-management/virtual-network-concepts.md +++ b/articles/api-management/virtual-network-concepts.md @@ -1,43 +1,54 @@ --- title: Azure API Management with an Azure virtual network -description: Learn about scenarios and requirements to connect your API Management instance to an Azure virtual network. +description: Learn about scenarios and requirements to secure your API Management instance using an Azure virtual network. author: dlepow ms.service: api-management ms.topic: conceptual -ms.date: 01/14/2022 +ms.date: 05/26/2022 ms.author: danlep ms.custom: --- # Use a virtual network with Azure API Management -With Azure virtual networks (VNets), you can place ("inject") your API Management instance in a non-internet-routable network to which you control access. In a virtual network, your API Management instance can securely access other networked Azure resources and also connect to on-premises networks using various VPN technologies. To learn more about Azure VNets, start with the information in the [Azure Virtual Network Overview](../virtual-network/virtual-networks-overview.md). +API Management provides several options to secure access to your API Management instance and APIs using an Azure virtual network. API Management supports the following options, which are mutually exclusive: + +* **Integration (injection)** of the API Management instance into the virtual network, enabling the gateway to access resources in the network. + + You can choose one of two integration modes: *external* or *internal*. They differ in whether inbound connectivity to the gateway and other API Management endpoints is allowed from the internet or only from within the virtual network. + +* **Enabling secure and private connectivity** to the API Management gateway using a *private endpoint* (preview). -> [!TIP] -> API Management also supports [private endpoints](../private-link/private-endpoint-overview.md). A private endpoint enables secure client connectivity to your API Management instance using a private IP address from your virtual network and Azure Private Link. [Learn more](private-endpoint.md) about using private endpoints with API Management. +The following table compares virtual networking options. For more information, see later sections of this article and links to detailed guidance. + +|Networking model |Supported tiers |Supported components |Supported traffic |Usage scenario | +|---------|---------|---------|---------|----| +|**[Virtual network - external](#virtual-network-integration)** | Developer, Premium | Azure portal, gateway, management plane, and Git repository | Inbound and outbound traffic can be allowed to internet, peered virtual networks, Express Route, and S2S VPN connections. | External access to private and on-premises backends +|**[Virtual network - internal](#virtual-network-integration)** | Developer, Premium | Developer portal, gateway, management plane, and Git repository. | Inbound and outbound traffic can be allowed to peered virtual networks, Express Route, and S2S VPN connections. | Internal access to private and on-premises backends +|**[Private endpoint (preview)](#private-endpoint)** | Developer, Basic, Standard, Premium | Gateway only (managed gateway supported, self-hosted gateway not supported). | Only inbound traffic can be allowed from internet, peered virtual networks, Express Route, and S2S VPN connections. | Secure client connection to API Management gateway | + +## Virtual network integration +With Azure virtual networks (VNets), you can place ("inject") your API Management instance in a non-internet-routable network to which you control access. In a virtual network, your API Management instance can securely access other networked Azure resources and also connect to on-premises networks using various VPN technologies. To learn more about Azure VNets, start with the information in the [Azure Virtual Network Overview](../virtual-network/virtual-networks-overview.md). -This article explains VNet connectivity options, requirements, and considerations for your API Management instance. You can use the Azure portal, Azure CLI, Azure Resource Manager templates, or other tools for the configuration. You control inbound and outbound traffic into the subnet in which API Management is deployed by using [network security groups][NetworkSecurityGroups]. + You can use the Azure portal, Azure CLI, Azure Resource Manager templates, or other tools for the configuration. You control inbound and outbound traffic into the subnet in which API Management is deployed by using [network security groups](../virtual-network/network-security-groups-overview.md). For detailed deployment steps and network configuration, see: * [Connect to an external virtual network using Azure API Management](./api-management-using-with-vnet.md). * [Connect to an internal virtual network using Azure API Management](./api-management-using-with-internal-vnet.md). -[!INCLUDE [premium-dev.md](../../includes/api-management-availability-premium-dev.md)] - -## Access options - -When created, an API Management instance must be accessible from the internet. Using a virtual network, you can configure the developer portal, API gateway, and other API Management endpoints to be accessible either from the internet (external mode) or only within the VNet (internal mode). +### Access options +Using a virtual network, you can configure the developer portal, API gateway, and other API Management endpoints to be accessible either from the internet (external mode) or only within the VNet (internal mode). * **External** - The API Management endpoints are accessible from the public internet via an external load balancer. The gateway can access resources within the VNet. - :::image type="content" source="media/virtual-network-concepts/api-management-vnet-external.png" alt-text="Connect to external VNet"::: + :::image type="content" source="media/virtual-network-concepts/api-management-vnet-external.png" alt-text="Diagram showing a connection to external VNet." lightbox="media/virtual-network-concepts/api-management-vnet-external.png"::: Use API Management in external mode to access backend services deployed in the virtual network. * **Internal** - The API Management endpoints are accessible only from within the VNet via an internal load balancer. The gateway can access resources within the VNet. - :::image type="content" source="media/virtual-network-concepts/api-management-vnet-internal.png" alt-text="Connect to internal VNet"::: + :::image type="content" source="media/virtual-network-concepts/api-management-vnet-internal.png" alt-text="Diagram showing a connection to internal VNet." lightbox="media/virtual-network-concepts/api-management-vnet-internal.png"::: Use API Management in internal mode to: @@ -46,11 +57,11 @@ When created, an API Management instance must be accessible from the internet. U * Manage your APIs hosted in multiple geographic locations, using a single gateway endpoint. -## Network resource requirements +### Network resource requirements The following are virtual network resource requirements for API Management. Some requirements differ depending on the version (`stv2` or `stv1`) of the [compute platform](compute-infrastructure.md) hosting your API Management instance. -### [stv2](#tab/stv2) +#### [stv2](#tab/stv2) * An Azure Resource Manager virtual network is required. * You must provide a Standard SKU [public IPv4 address](../virtual-network/ip-services/public-ip-addresses.md#sku) in addition to specifying a virtual network and subnet. @@ -59,16 +70,16 @@ The following are virtual network resource requirements for API Management. Some * The API Management service, virtual network and subnet, and public IP address resource must be in the same region and subscription. * For multi-region API Management deployments, configure virtual network resources separately for each location. -### [stv1](#tab/stv1) +#### [stv1](#tab/stv1) * An Azure Resource Manager virtual network is required. -* The subnet used to connect to the API Management instance must be dedicated to API Management. It cannot contain other Azure resource types. +* The subnet used to connect to the API Management instance must be dedicated to API Management. It can't contain other Azure resource types. * The API Management service, virtual network, and subnet resources must be in the same region and subscription. -* For multi-region API Management deployments, you configure virtual network resources separately for each location. +* For multi-region API Management deployments, configure virtual network resources separately for each location. --- -## Subnet size +### Subnet size The minimum size of the subnet in which API Management can be deployed is /29, which gives three usable IP addresses. Each extra scale [unit](api-management-capacity.md) of API Management requires two more IP addresses. The minimum size requirement is based on the following considerations: @@ -80,46 +91,78 @@ The minimum size of the subnet in which API Management can be deployed is /29, w * When deploying into an [internal VNet](./api-management-using-with-internal-vnet.md), the instance requires an extra IP address for the internal load balancer. -## Routing +### Routing See the Routing guidance when deploying your API Management instance into an [external VNet](./api-management-using-with-vnet.md#routing) or [internal VNet](./api-management-using-with-internal-vnet.md#routing). Learn more about the [IP addresses of API Management](api-management-howto-ip-addresses.md). -## DNS +### DNS -* In external mode, the VNet enables [Azure-provided name resolution](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#azure-provided-name-resolution) by default for your API Management endpoints and other Azure resources. It does not provide name resolution for on-premises resources. Optionally, configure your own DNS solution. +* In external mode, the VNet enables [Azure-provided name resolution](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#azure-provided-name-resolution) by default for your API Management endpoints and other Azure resources. It doesn't provide name resolution for on-premises resources. Optionally, configure your own DNS solution. * In internal mode, you must provide your own DNS solution to ensure name resolution for API Management endpoints and other required Azure resources. We recommend configuring an Azure [private DNS zone](../dns/private-dns-overview.md). For more information, see the DNS guidance when deploying your API Management instance into an [external VNet](./api-management-using-with-vnet.md#routing) or [internal VNet](./api-management-using-with-internal-vnet.md#routing). -For more information, see: +Related information: * [Name resolution for resources in Azure virtual networks](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#name-resolution-that-uses-your-own-dns-server). * [Create an Azure private DNS zone](../dns/private-dns-getstarted-portal.md) > [!IMPORTANT] > If you plan to use a custom DNS solution for the VNet, set it up **before** deploying an API Management service into it. Otherwise, you'll need to update the API Management service each time you change the DNS server(s) by running the [Apply Network Configuration Operation](/rest/api/apimanagement/current-ga/api-management-service/apply-network-configuration-updates), or by selecting **Apply network configuration** in the service instance's network configuration window in the Azure portal. -## Limitations +### Limitations -Some limitations differ depending on the version (`stv2` or `stv1`) of the [compute platform](compute-infrastructure.md) hosting your API Management instance. +Some virtual network limitations differ depending on the version (`stv2` or `stv1`) of the [compute platform](compute-infrastructure.md) hosting your API Management instance. -### [stv2](#tab/stv2) +#### [stv2](#tab/stv2) * A subnet containing API Management instances can't be moved across subscriptions. * For multi-region API Management deployments configured in internal VNet mode, users own the routing and are responsible for managing the load balancing across multiple regions. * To import an API to API Management from an [OpenAPI specification](import-and-publish.md), the specification URL must be hosted at a publicly accessible internet address. -### [stv1](#tab/stv1) +#### [stv1](#tab/stv1) -* A subnet containing API Management instances can't be movacross subscriptions. +* A subnet containing API Management instances can't be moved across subscriptions. * For multi-region API Management deployments configured in internal VNet mode, users own the routing and are responsible for managing the load balancing across multiple regions. * To import an API to API Management from an [OpenAPI specification](import-and-publish.md), the specification URL must be hosted at a publicly accessible internet address. -* Due to platform limitations, connectivity between a resource in a globally peered VNet in another region and an API Management service in internal mode will not work. For more information, see the [virtual network documentation](../virtual-network/virtual-network-manage-peering.md#requirements-and-constraints). +* Due to platform limitations, connectivity between a resource in a globally peered VNet in another region and an API Management service in internal mode won't work. For more information, see the [virtual network documentation](../virtual-network/virtual-network-manage-peering.md#requirements-and-constraints). --- +## Private endpoint + +API Management supports [private endpoints](../private-link/private-endpoint-overview.md). A private endpoint enables secure client connectivity to your API Management instance using a private IP address from your virtual network and Azure Private Link. + +:::image type="content" source="media/virtual-network-concepts/api-management-private-endpoint.png" alt-text="Diagram showing a secure connection to API Management using private endpoint." lightbox="media/virtual-network-concepts/api-management-private-endpoint.png"::: + +With a private endpoint and Private Link, you can: + +* Create multiple Private Link connections to an API Management instance. +* Use the private endpoint to send inbound traffic on a secure connection. +* Use policy to distinguish traffic that comes from the private endpoint. +* Limit incoming traffic only to private endpoints, preventing data exfiltration. + +> [!IMPORTANT] +> * API Management support for private endpoints is currently in preview. +> * During the preview period, a private endpoint connection supports only incoming traffic to the API Management managed gateway. + +For more information, see [Connect privately to API Management using a private endpoint](private-endpoint.md). + +## Advanced networking configurations + +### Secure API Management endpoints with a web application firewall + +You may have scenarios where you need both secure external and internal access to your API Management instance, and flexibility to reach private and on-premises backends. For these scenarios, you may choose to manage external access to the endpoints of an API Management instance with a web application firewall (WAF). + +One example is to deploy an API Management instance in an internal virtual network, and route public access to it using an internet-facing Azure Application Gateway: + +:::image type="content" source="media/virtual-network-concepts/api-management-application-gateway.png" alt-text="Diagram showing Application Gateway in front of API Management instance." lightbox="media/virtual-network-concepts/api-management-application-gateway.png"::: + +For more information, see [Integrate API Management in an internal virtual network with Application Gateway](api-management-howto-integrate-internal-vnet-appgateway.md). + + ## Next steps Learn more about: @@ -128,11 +171,13 @@ Learn more about: * [Connecting a virtual network from different deployment models](../vpn-gateway/vpn-gateway-connect-different-deployment-models-powershell.md) * [Virtual network frequently asked questions](../virtual-network/virtual-networks-faq.md) -Connect to a virtual network: +Virtual network configuration with API Management: * [Connect to an external virtual network using Azure API Management](./api-management-using-with-vnet.md). * [Connect to an internal virtual network using Azure API Management](./api-management-using-with-internal-vnet.md). +* [Connect privately to API Management using a private endpoint](private-endpoint.md) + -Review the following topics +Related articles: * [Connecting a Virtual Network to backend using Vpn Gateway](../vpn-gateway/design.md#s2smulti) * [Connecting a Virtual Network from different deployment models](../vpn-gateway/vpn-gateway-connect-different-deployment-models-powershell.md) @@ -140,17 +185,6 @@ Review the following topics * [Virtual Network Frequently asked Questions](../virtual-network/virtual-networks-faq.md) * [Service tags](../virtual-network/network-security-groups-overview.md#service-tags) -[api-management-using-vnet-menu]: ./media/api-management-using-with-vnet/api-management-menu-vnet.png -[api-management-setup-vpn-select]: ./media/api-management-using-with-vnet/api-management-using-vnet-select.png -[api-management-setup-vpn-add-api]: ./media/api-management-using-with-vnet/api-management-using-vnet-add-api.png -[api-management-vnet-private]: ./media/virtual-network-concepts/api-management-vnet-internal.png -[api-management-vnet-public]: ./media/virtual-network-concepts/api-management-vnet-external.png -[Enable VPN connections]: #enable-vpn -[Connect to a web service behind VPN]: #connect-vpn -[Related content]: #related-content -[UDRs]: ../virtual-network/virtual-networks-udr-overview.md -[NetworkSecurityGroups]: ../virtual-network/network-security-groups-overview.md -[ServiceEndpoints]: ../virtual-network/virtual-network-service-endpoints-overview.md -[ServiceTags]: ../virtual-network/network-security-groups-overview.md#service-tags + diff --git a/articles/app-service/configure-custom-container.md b/articles/app-service/configure-custom-container.md index 938069c61c35..7a377aa88cd7 100644 --- a/articles/app-service/configure-custom-container.md +++ b/articles/app-service/configure-custom-container.md @@ -206,7 +206,7 @@ The only exception is the `C:\home\LogFiles` directory, which is used to store t ::: zone pivot="container-linux" -You can use the */home* directory in your custom container file system to persist files across restarts and share them across instances. The `/home` directory is provided to enable your custom container to access persistent storage. Saving data within `/home` will contribute to the [storage space quota](https://docs.microsoft.com/azure/azure-resource-manager/management/azure-subscription-service-limits#app-service-limits) included with your App Service Plan. +You can use the */home* directory in your custom container file system to persist files across restarts and share them across instances. The `/home` directory is provided to enable your custom container to access persistent storage. Saving data within `/home` will contribute to the [storage space quota](../azure-resource-manager/management/azure-subscription-service-limits.md#app-service-limits) included with your App Service Plan. When persistent storage is disabled, then writes to the `/home` directory are not persisted across app restarts or across multiple instances. When persistent storage is enabled, all writes to the `/home` directory are persisted and can be accessed by all instances of a scaled-out app. Additionally, any contents inside the `/home` directory of the container are overwritten by any existing files already present on the persistent storage when the container starts. @@ -524,4 +524,4 @@ The following lists show supported and unsupported Docker Compose configuration Or, see additional resources: - [Environment variables and app settings reference](reference-app-settings.md) -- [Load certificate in Windows/Linux containers](configure-ssl-certificate-in-code.md#load-certificate-in-linuxwindows-containers) +- [Load certificate in Windows/Linux containers](configure-ssl-certificate-in-code.md#load-certificate-in-linuxwindows-containers) \ No newline at end of file diff --git a/articles/app-service/environment/migrate.md b/articles/app-service/environment/migrate.md index d7e4fe6497a3..8ca298d07268 100644 --- a/articles/app-service/environment/migrate.md +++ b/articles/app-service/environment/migrate.md @@ -88,7 +88,7 @@ If your App Service Environment doesn't pass the validation checks or you try to |Migrate is not available for this kind|App Service Environment v1 can't be migrated using the migration feature at this time. |Migrate using one of the [manual migration options](migration-alternatives.md) if you want to migrate immediately. Otherwise, wait for the migration feature to support this App Service Environment configuration. | |Full migration cannot be called before IP addresses are generated|You'll see this error if you attempt to migrate before finishing the pre-migration steps. |Ensure you've completed all pre-migration steps before you attempt to migrate. See the [step-by-step guide for migrating](how-to-migrate.md). | |Migration to ASEv3 is not allowed for this ASE|You won't be able to migrate using the migration feature. |Migrate using one of the [manual migration options](migration-alternatives.md). | -|Subscription has too many App Service Environments. Please remove some before trying to create more.|The App Service Environment [quota for your subscription](/azure/azure-resource-manager/management/azure-subscription-service-limits#app-service-limits) has been met. |Remove unneeded environments or contact support to review your options. | +|Subscription has too many App Service Environments. Please remove some before trying to create more.|The App Service Environment [quota for your subscription](../../azure-resource-manager/management/azure-subscription-service-limits.md#app-service-limits) has been met. |Remove unneeded environments or contact support to review your options. | |`` is not available in this location|You'll see this error if you're trying to migrate an App Service Environment in a region that doesn't support one of your requested features. |Migrate using one of the [manual migration options](migration-alternatives.md) if you want to migrate immediately. Otherwise, wait for the migration feature to support this App Service Environment configuration. | |Migrate cannot be called on this ASE until the active upgrade has finished. |App Service Environments can't be migrated during platform upgrades. You can set your [upgrade preference](using-an-ase.md#upgrade-preference) from the Azure portal. |Wait until the upgrade finishes and then migrate. | @@ -165,4 +165,4 @@ There's no cost to migrate your App Service Environment. You'll stop being charg > [App Service Environment v3 Networking](networking.md) > [!div class="nextstepaction"] -> [Using an App Service Environment v3](using.md) +> [Using an App Service Environment v3](using.md) \ No newline at end of file diff --git a/articles/app-service/quickstart-php.md b/articles/app-service/quickstart-php.md index 35a6a26a8cc4..7d27471704d2 100644 --- a/articles/app-service/quickstart-php.md +++ b/articles/app-service/quickstart-php.md @@ -59,15 +59,12 @@ Azure CLI has a command [`az webapp up`](/cli/azure/webapp#az_webapp_up) that wi In the terminal, deploy the code in your local folder using the [`az webapp up`](/cli/azure/webapp#az_webapp_up) command: ```azurecli -az webapp up \ - --sku F1 \ - --logs +az webapp up --runtime "php|8.0" --os-type=linux ``` - If the `az` command isn't recognized, be sure you have Azure CLI installed. - -- The `--sku F1` argument creates the web app on the Free pricing tier, which incurs a no cost. -- The `--logs` flag configures default logging required to enable viewing the log stream immediately after launching the webapp. +- The `--runtime "php|8.0"` argument creates the web app with PHP version 8.0. +- The `--os-type=linux` argument creates the web app on App Service on Linux. - You can optionally specify a name with the argument `--name `. If you don't provide one, then a name will be automatically generated. - You can optionally include the argument `--location ` where `` is an available Azure region. You can retrieve a list of allowable regions for your Azure account by running the [`az account list-locations`](/cli/azure/appservice#az_appservice_list_locations) command. - If you see the error, "Could not auto-detect the runtime stack of your app," make sure you're running the command in the code directory (See [Troubleshooting auto-detect issues with az webapp up](https://github.com/Azure/app-service-linux-docs/blob/master/AzWebAppUP/runtime_detection.md)). @@ -81,7 +78,7 @@ Resource group creation complete Creating AppServicePlan '<app-service-plan-name>' ... Creating webapp '<app-name>' ... Configuring default logging for the app, if not already enabled -Creating zip with contents of dir /home/cephas/myExpressApp ... +Creating zip with contents of dir /home/msangapu/myPhpApp ... Getting scm site credentials for zip deployment Starting zip deployment. This operation can take a while to complete ... Deployment endpoint responded with status code 202 @@ -116,10 +113,10 @@ Browse to the deployed application in your web browser at the URL `http:// [!div class="nextstepaction"] > [Configure PHP app](configure-language-php.md) -::: zone-end \ No newline at end of file +::: zone-end diff --git a/articles/app-service/tutorial-connect-msi-azure-database.md b/articles/app-service/tutorial-connect-msi-azure-database.md index 361aead40518..203d7238a58e 100644 --- a/articles/app-service/tutorial-connect-msi-azure-database.md +++ b/articles/app-service/tutorial-connect-msi-azure-database.md @@ -13,11 +13,11 @@ ms.custom: "mvc, devx-track-azurecli" [App Service](overview.md) provides a highly scalable, self-patching web hosting service in Azure. It also provides a [managed identity](overview-managed-identity.md) for your app, which is a turn-key solution for securing access to Azure databases, including: - [Azure SQL Database](/azure/azure-sql/database/) -- [Azure Database for MySQL](/azure/mysql/) -- [Azure Database for PostgreSQL](/azure/postgresql/) +- [Azure Database for MySQL](../mysql/index.yml) +- [Azure Database for PostgreSQL](../postgresql/index.yml) > [!NOTE] -> This tutorial doesn't include guidance for [Azure Cosmos DB](/azure/cosmos-db/), which supports Azure Active Directory authentication differently. For information, see Cosmos DB documentation. For example: [Use system-assigned managed identities to access Azure Cosmos DB data](../cosmos-db/managed-identity-based-authentication.md). +> This tutorial doesn't include guidance for [Azure Cosmos DB](../cosmos-db/index.yml), which supports Azure Active Directory authentication differently. For information, see Cosmos DB documentation. For example: [Use system-assigned managed identities to access Azure Cosmos DB data](../cosmos-db/managed-identity-based-authentication.md). Managed identities in App Service make your app more secure by eliminating secrets from your app, such as credentials in the connection strings. This tutorial shows you how to connect to the above-mentioned databases from App Service using managed identities. @@ -1098,4 +1098,4 @@ What you learned: > [Tutorial: Connect to Azure services that don't support managed identities (using Key Vault)](tutorial-connect-msi-key-vault.md) > [!div class="nextstepaction"] -> [Tutorial: Isolate back-end communication with Virtual Network integration](tutorial-networking-isolate-vnet.md) +> [Tutorial: Isolate back-end communication with Virtual Network integration](tutorial-networking-isolate-vnet.md) \ No newline at end of file diff --git a/articles/app-service/webjobs-dotnet-deploy-vs.md b/articles/app-service/webjobs-dotnet-deploy-vs.md index 42044702fe23..73ebdaacebf2 100644 --- a/articles/app-service/webjobs-dotnet-deploy-vs.md +++ b/articles/app-service/webjobs-dotnet-deploy-vs.md @@ -13,7 +13,7 @@ ms.reviewer: david.ebbo;suwatch;pbatum;naren.soni # Develop and deploy WebJobs using Visual Studio -This article explains how to use Visual Studio to deploy a console app project to a web app in [Azure App Service](overview.md) as an [Azure WebJob](/azure/app-service/webjobs-create). For information about how to deploy WebJobs by using the [Azure portal](https://portal.azure.com), see [Run background tasks with WebJobs in Azure App Service](webjobs-create.md). +This article explains how to use Visual Studio to deploy a console app project to a web app in [Azure App Service](overview.md) as an [Azure WebJob](./webjobs-create.md). For information about how to deploy WebJobs by using the [Azure portal](https://portal.azure.com), see [Run background tasks with WebJobs in Azure App Service](webjobs-create.md). You can choose to develop a WebJob that runs as either a [.NET Core app](#webjobs-as-net-core-console-apps) or a [.NET Framework app](#webjobs-as-net-framework-console-apps). Version 3.x of the [Azure WebJobs SDK](webjobs-sdk-how-to.md) lets you develop WebJobs that run as either .NET Core apps or .NET Framework apps, while version 2.x supports only the .NET Framework. The way that you deploy a WebJobs project is different for .NET Core projects than for .NET Framework projects. @@ -98,7 +98,7 @@ To create a new WebJobs-enabled project, use the console app project template an Create a project that is configured to deploy automatically as a WebJob when you deploy a web project in the same solution. Use this option when you want to run your WebJob in the same web app in which you run the related web application. > [!NOTE] -> The WebJobs new-project template automatically installs NuGet packages and includes code in *Program.cs* for the [WebJobs SDK](/azure/app-service/webjobs-sdk-get-started). If you don't want to use the WebJobs SDK, remove or change the `host.RunAndBlock` statement in *Program.cs*. +> The WebJobs new-project template automatically installs NuGet packages and includes code in *Program.cs* for the [WebJobs SDK](./webjobs-sdk-get-started.md). If you don't want to use the WebJobs SDK, remove or change the `host.RunAndBlock` statement in *Program.cs*. > > @@ -239,4 +239,4 @@ If you enable **Always on** in Azure, you can use Visual Studio to change the We ## Next steps > [!div class="nextstepaction"] -> [Learn more about the WebJobs SDK](webjobs-sdk-how-to.md) +> [Learn more about the WebJobs SDK](webjobs-sdk-how-to.md) \ No newline at end of file diff --git a/articles/application-gateway/configuration-infrastructure.md b/articles/application-gateway/configuration-infrastructure.md index ac73f013be49..8a8bd2f7e383 100644 --- a/articles/application-gateway/configuration-infrastructure.md +++ b/articles/application-gateway/configuration-infrastructure.md @@ -15,7 +15,7 @@ The application gateway infrastructure includes the virtual network, subnets, ne ## Virtual network and dedicated subnet -An application gateway is a dedicated deployment in your virtual network. Within your virtual network, a dedicated subnet is required for the application gateway. You can have multiple instances of a given application gateway deployment in a subnet. You can also deploy other application gateways in the subnet. But you can't deploy any other resource in the application gateway subnet. You can't mix Standard_v2 and Standard Azure Application Gateway on the same subnet. +An application gateway is a dedicated deployment in your virtual network. Within your virtual network, a dedicated subnet is required for the application gateway. You can have multiple instances of a given application gateway deployment in a subnet. You can also deploy other application gateways in the subnet. But you can't deploy any other resource in the application gateway subnet. You can't mix v1 and v2 Azure Application Gateway SKUs on the same subnet. > [!NOTE] > [Virtual network service endpoint policies](../virtual-network/virtual-network-service-endpoint-policies-overview.md) are currently not supported in an Application Gateway subnet. diff --git a/articles/application-gateway/http-response-codes.md b/articles/application-gateway/http-response-codes.md index 9e4613fa0cf9..425cb3e069ba 100644 --- a/articles/application-gateway/http-response-codes.md +++ b/articles/application-gateway/http-response-codes.md @@ -83,13 +83,13 @@ An HTTP 499 response is presented if a client request that is sent to applicatio #### 500 – Internal Server Error -Azure Application Gateway shouldn't exhibit 500 response codes. Please open a support request if you see this code, because this issue is an internal error to the service. For information on how to open a support case, see [Create an Azure support request](/azure/azure-portal/supportability/how-to-create-azure-support-request). +Azure Application Gateway shouldn't exhibit 500 response codes. Please open a support request if you see this code, because this issue is an internal error to the service. For information on how to open a support case, see [Create an Azure support request](../azure-portal/supportability/how-to-create-azure-support-request.md). #### 502 – Bad Gateway HTTP 502 errors can have several root causes, for example: - NSG, UDR, or custom DNS is blocking access to backend pool members. -- Back-end VMs or instances of [virtual machine scale sets](/azure/virtual-machine-scale-sets/overview) aren't responding to the default health probe. +- Back-end VMs or instances of [virtual machine scale sets](../virtual-machine-scale-sets/overview.md) aren't responding to the default health probe. - Invalid or improper configuration of custom health probes. - Azure Application Gateway's [back-end pool isn't configured or empty](application-gateway-troubleshooting-502.md#empty-backendaddresspool). - None of the VMs or instances in [virtual machine scale set are healthy](application-gateway-troubleshooting-502.md#unhealthy-instances-in-backendaddresspool). @@ -103,4 +103,4 @@ HTTP 504 errors are presented if a request is sent to application gateways using ## Next steps -If the information in this article doesn't help to resolve the issue, [submit a support ticket](https://azure.microsoft.com/support/options/). +If the information in this article doesn't help to resolve the issue, [submit a support ticket](https://azure.microsoft.com/support/options/). \ No newline at end of file diff --git a/articles/applied-ai-services/form-recognizer/concept-read.md b/articles/applied-ai-services/form-recognizer/concept-read.md index 80e1f756e19a..69bd79ed424c 100644 --- a/articles/applied-ai-services/form-recognizer/concept-read.md +++ b/articles/applied-ai-services/form-recognizer/concept-read.md @@ -15,7 +15,7 @@ ms.custom: ignite-fall-2021 # Form Recognizer read model -The Form Recognizer v3.0 preview includes the new Read OCR model. Form Recognizer Read builds on the success of COmputer Vision Read and optimizes even more for analyzing documents, including new document formats in the future. It extracts printed and handwritten text from documents and images and can handle mixed languages in the documents and text line. The read model can detect lines, words, locations, and additionally detect languages. It is the foundational technology powering the text extraction in Form Recognizer Layout, prebuilt, general document, and custom models. +The Form Recognizer v3.0 preview includes the new Read OCR model. Form Recognizer Read builds on the success of Computer Vision Read and optimizes even more for analyzing documents, including new document formats in the future. It extracts printed and handwritten text from documents and images and can handle mixed languages in the documents and text line. The read model can detect lines, words, locations, and additionally detect languages. It is the foundational technology powering the text extraction in Form Recognizer Layout, prebuilt, general document, and custom models. ## Development options @@ -77,7 +77,7 @@ Form Recognizer preview version supports several languages for the read model. * ### Text lines and words -Read API extracts text from documents and images. It accepts PDFs and images of documents and handles printed and/or handwritten text, and supports mixed languages. Text is extracted as text lnes, words, bounding boxes, confidence scores, and style, whether handwritten or not, supported for Latin languages only. +Read API extracts text from documents and images. It accepts PDFs and images of documents and handles printed and/or handwritten text, and supports mixed languages. Text is extracted as text lines, words, bounding boxes, confidence scores, and style, whether handwritten or not, supported for Latin languages only. ### Language detection diff --git a/articles/applied-ai-services/form-recognizer/resource-customer-stories.md b/articles/applied-ai-services/form-recognizer/resource-customer-stories.md index ef9536535735..64a6a29f840b 100644 --- a/articles/applied-ai-services/form-recognizer/resource-customer-stories.md +++ b/articles/applied-ai-services/form-recognizer/resource-customer-stories.md @@ -7,7 +7,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: forms-recognizer ms.topic: conceptual -ms.date: 02/04/2022 +ms.date: 05/25/2022 ms.author: lajanuar --- @@ -17,23 +17,25 @@ The following customers and partners have adopted Form Recognizer across a wide | Customer/Partner | Description | Link | |---------|-------------|----------------------| -| **Acumatica** | [**Acumatica**](https://www.acumatica.com/) is a technology provider that develops cloud- and browser-based enterprise resource planning (ERP) software for small and medium-sized businesses (SMBs). To bring expense claims into the modern age, Acumatica incorporated Form Recognizer into its native application. The Form Recognizer's prebuilt-receipt API and machine learning capabilities are used to automatically extract data from receipts. Acumatica's customers can file multiple, error-free claims in a matter of seconds, freeing up more time to focus on other important tasks. | [Customer story](https://customers.microsoft.com/story/762684-acumatica-partner-professional-services-azure) | +| **Acumatica** | [**Acumatica**](https://www.acumatica.com/) is a technology provider that develops cloud and browser-based enterprise resource planning (ERP) software for small and medium-sized businesses (SMBs). To bring expense claims into the modern age, Acumatica incorporated Form Recognizer into its native application. The Form Recognizer's prebuilt-receipt API and machine learning capabilities are used to automatically extract data from receipts. Acumatica's customers can file multiple, error-free claims in a matter of seconds, freeing up more time to focus on other important tasks. | [Customer story](https://customers.microsoft.com/story/762684-acumatica-partner-professional-services-azure) | + | **Air Canada** | In September 2021, [**Air Canada**](https://www.aircanada.com/) was tasked with verifying the COVID-19 vaccination status of thousands of worldwide employees in only two months. After realizing manual verification would be too costly and complex within the time constraint, Air Canada turned to its internal AI team for an automated solution. The AI team partnered with Microsoft and used Form Recognizer to roll out a fully functional, accurate solution within weeks. This partnership met the government mandate on time and saved thousands of hours of manual work. | [Customer story](https://customers.microsoft.com/story/1505667713938806113-air-canada-travel-transportation-azure-form-recognizer)| |**Arkas Logistics** | [**Arkas Logistics**](http://www.arkaslojistik.com.tr/) is operates under the umbrella of Arkas Holding, Turkey's leading holding institution and operating in 23 countries. During the COVID-19 crisis, Arkas Logistics has been able to provide outstanding, complete logistical services thanks to its focus on contactless operation and digitalization steps. Form Recognizer powers a solution that maintains the continuity of the supply chain and allows for uninterrupted service. | [Customer story](https://customers.microsoft.com/story/842149-arkas-logistics-transportation-azure-en-turkey ) | |**Automation Anywhere**| [**Automation Anywhere**](https://www.automationanywhere.com/) is on a singular and unwavering mission to democratize automation by liberating teams from mundane, repetitive tasks, and allowing more time for innovation and creativity with cloud-native robotic process automation (RPA)software. To protect the citizens of the United Kingdom, healthcare providers must process tens of thousands of COVID-19 tests daily, each one accompanied by a form for the World Health Organization (WHO). Manually completing and processing these forms would potentially slow testing and divert resources away from patient care. In response, Automation Anywhere built an AI-powered bot to help a healthcare provider automatically process and submit the COVID-19 test forms at scale. | [Customer story](https://customers.microsoft.com/story/811346-automation-anywhere-partner-professional-services-azure-cognitive-services) | |**AvidXchange**| [**AvidXchange**](https://www.avidxchange.com/) has developed an accounts payable automation solution applying Form Recognizer. AvidXchange partners with Azure Cognitive Services to deliver an accounts payable automation solution for the middle market. Customers benefit from faster invoice processing times and increased accuracy to ensure their suppliers are paid the right amount, at the right time. | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| |**Blue Prism**| [**Blue Prism**](https://www.blueprism.com/) Decipher is an AI-powered document processing capability that's directly embedded into the company's connected-RPA platform. Decipher works with Form Recognizer to help organizations process forms faster and with less human effort. One of Blue Prism's customers has been testing the solution to automate invoice handling as part of its procurement process. | [Customer story](https://customers.microsoft.com/story/737482-blue-prism-partner-professional-services-azure) | |**Chevron**| [**Chevron**](https://www.chevron.com//) Canada Business Unit is now using Form Recognizer with UiPath's robotic process automation platform to automate the extraction of data and move it into back-end systems for analysis. Subject matter experts have more time to focus on higher-value activities and information flows more rapidly. Accelerated operational control enables the company to analyze its business with greater speed, accuracy, and depth. | [Customer story](https://customers.microsoft.com/story/chevron-mining-oil-gas-azure-cognitive-services)| |**Cross Masters**|[**Cross Masters**](https://crossmasters.com/), uses cutting-edge AI technologies not only as a passion, but as an essential part of a work culture requiring continuous innovation. One of the latest success stories is automation of manual paperwork required to process thousands of invoices. Cross Masters used Form Recognizer to develop a unique, customized solution, to provide clients with market insights from a large set of collected invoices. Most impressive is the extraction quality and continuous introduction of new features, such as model composing and table labeling. | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| -|**Element**| [**Element**](https://www.element.com/) is a global business that provides specialist testing, inspection, and certification services to a diverse range of businesses. Element is one of the fastest growing companies in the global testing, inspection and certification sector having over 6,500 engaged experts working in more than 200 facilities across the globe. When the finance team for the Americas was forced to work from home during the COVID-19 pandemic, it needed to digitalize its paper processes fast. The creativity of the team and its use of Azure Form Recognizer delivered more than business as usual—it delivered significant efficiencies. The Element team used the tools in Microsoft Azure so the next phase could be expedited. Rather than coding from scratch, they saw the opportunity to use the Azure Form Recognizer. This integration quickly gave them the functionality they needed, together with the agility and security of Microsoft Azure. Microsoft Azure Logic Apps is used to automate the process of extracting the documents from email, storing them, and updating the system with the extracted data. Computer Vision, part of Azure Cognitive Services, partners with Azure Form Recognizer to extract the right data points from the invoice documents—whether they're a pdf or scanned images. | [Customer story](https://customers.microsoft.com/story/1414941527887021413-element)| -|**Emaar Properties**| [**Emaar Properties**](https://www.emaar.com/en/), operates Dubai Mall, the world's most-visited retail and entertainment destination. Each year, the Dubai Mall draws more than 80 million visitors. To enrich the shopping experience, Emaar Properties offers a unique rewards program through a dedicated mobile app. Loyalty program points are earned via submitted receipts. Emaar Properties uses Microsoft Azure Form Recognizer to process submitted receipts and has achieved 92 percent reading accuracy.| [Customer story](https://customers.microsoft.com/story/1459754150957690925-emaar-retailers-azure-en-united-arab-emirates)| -|**EY**| [**EY**](https://ey.com/) (Ernst & Young Global Limited) is a multinational professional services network that helps to create long-term value for clients and build trust in the capital markets. Enabled by data and technology, diverse EY teams in over 150 countries to help clients grow, transform, and operate. EY teams work across assurance, consulting, law, strategy, tax, and transactions to find solutions for complex issues facing our world today. The EY Technology team collaborated with Microsoft to build a platform that hastens invoice extraction and contract comparison processes. Azure Form Recognizer and Custom Vision partnered to enable EY teams to automate and improve the OCR and document handling processes for its consulting, tax, audit, and transactions services clients. | [Customer story](https://customers.microsoft.com/story/1404985164224935715-ey-professional-services-azure-form-recognizer)| -|**Financial Fabric**| [**Financial Fabric**](https://www.financialfabric.com//), a Microsoft Cloud Solution Provider, delivers data architecture, science, and analytics services to investment managers at hedge funds, family offices, and corporate treasuries. Its daily processes involve extracting and normalizing data from thousands of complex financial documents, such as bank statements and legal agreements. The company then provides custom analytics to help its clients make better investment decisions. Extracting this data previously took days or weeks. By using Form Recognizer, Financial Fabric has reduced the time it takes to go from extraction to analysis to just minutes. | [Customer story](https://customers.microsoft.com/story/financial-fabric-banking-capital-markets-azure)| -|**GEP**| [**GEP**](https://www.gep.com/) has developed an invoice processing solution for a client using Form Recognizer. "At GEP, we're seeing AI and automation make a profound impact on procurement and the supply chain. By combining our AI solution with Microsoft Form Recognizer, we automated the processing of 4,000 invoices a day for a client... It saved them tens of thousands of hours of manual effort, while improving accuracy, controls and compliance on a global scale." Sarateudu Sethi, GEP's Vice President of Artificial Intelligence. | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| +|**Element**| [**Element**](https://www.element.com/) is a global business that provides specialist testing, inspection, and certification services to a diverse range of businesses. Element is one of the fastest growing companies in the global testing, inspection and certification sector having over 6,500 engaged experts working in more than 200 facilities across the globe. When the finance team for the Americas was forced to work from home during the COVID-19 pandemic, it needed to digitalize its paper processes fast. The creativity of the team and its use of Azure Form Recognizer delivered more than business as usual—it delivered significant efficiencies. The Element team used the tools in Azure so the next phase could be expedited. Rather than coding from scratch, they saw the opportunity to use the Azure Form Recognizer. This integration quickly gave them the functionality they needed, together with the agility and security of Azure. Azure Logic Apps is used to automate the process of extracting the documents from email, storing them, and updating the system with the extracted data. Computer Vision, part of Azure Cognitive Services, partners with Azure Form Recognizer to extract the right data points from the invoice documents—whether they're a pdf or scanned images. | [Customer story](https://customers.microsoft.com/story/1414941527887021413-element)| +|**Emaar Properties**| [**Emaar Properties**](https://www.emaar.com/en/), operates Dubai Mall, the world's most-visited retail and entertainment destination. Each year, the Dubai Mall draws more than 80 million visitors. To enrich the shopping experience, Emaar Properties offers a unique rewards program through a dedicated mobile app. Loyalty program points are earned via submitted receipts. Emaar Properties uses Azure Form Recognizer to process submitted receipts and has achieved 92 percent reading accuracy.| [Customer story](https://customers.microsoft.com/story/1459754150957690925-emaar-retailers-azure-en-united-arab-emirates)| +|**EY**| [**EY**](https://ey.com/) (Ernst & Young Global Limited) is a multinational professional services network that helps to create long-term value for clients and build trust in the capital markets. Enabled by data and technology, diverse EY teams in over 150 countries to help clients grow, transform, and operate. EY teams work across assurance, consulting, law, strategy, tax, and transactions to find solutions for complex issues facing our world today. The EY Technology team collaborated with Microsoft to build a platform that hastens invoice extraction and contract comparison processes. Azure Form Recognizer and Custom Vision partnered to enable EY teams to automate and improve the OCR and document handling processes for its transactions services clients. | [Customer story](https://customers.microsoft.com/story/1404985164224935715-ey-professional-services-azure-form-recognizer)| +|**Financial Fabric**| [**Financial Fabric**](https://www.financialfabric.com/), a Microsoft Cloud Solution Provider, delivers data architecture, science, and analytics services to investment managers at hedge funds, family offices, and corporate treasuries. Its daily processes involve extracting and normalizing data from thousands of complex financial documents, such as bank statements and legal agreements. The company then provides custom analytics to help its clients make better investment decisions. Extracting this data previously took days or weeks. By using Form Recognizer, Financial Fabric has reduced the time it takes to go from extraction to analysis to just minutes. | [Customer story](https://customers.microsoft.com/story/financial-fabric-banking-capital-markets-azure)| +|**Fujitsu**| [**Fujitsu**](https://scanners.us.fujitsu.com/about-us) is the world leader in document scanning technology, with more than 50 percent of global market share, but that doesn't stop the company from constantly innovating. To improve the performance and accuracy of its cloud scanning solution, Fujitsu incorporated Azure Form Recognizer. It took only a few months to deploy the new technologies, and they have boosted character recognition rates as high as 99.9 percent. This collaboration helps Fujitsu deliver market-leading innovation and give its customers powerful and flexible tools for end-to-end document management. | [Customer story](https://customers.microsoft.com/en-us/story/1504311236437869486-fujitsu-document-scanning-azure-form-recognizer)| +|**GEP**| [**GEP**](https://www.gep.com/) has developed an invoice processing solution for a client using Form Recognizer. GEP combined their AI solution with Azure Form Recognizer to automate the processing of 4,000 invoices a day for a client saving them tens of thousands of hours of manual effort. This collaborative effort improved accuracy, controls, and compliance on a global scale." Sarateudu Sethi, GEP's Vice President of Artificial Intelligence. | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| |**HCA Healthcare**| [**HCA Healthcare**](https://hcahealthcare.com/) is one of the nation's leading providers of healthcare with over 180 hospitals and 2,000 sites-of-care located throughout the United States and serving approximately 35 million patients each year. Currently, they're using Azure Form Recognizer to simplify and improve the patient onboarding experience and reducing administrative time spent entering repetitive data into the care center's system. | [Customer story](https://customers.microsoft.com/story/1404891793134114534-hca-healthcare-healthcare-provider-azure)| |**Icertis**| [**Icertis**](https://www.icertis.com/), is a Software as a Service (SaaS) provider headquartered in Bellevue, Washington. Icertis digitally transforms the contract management process with a cloud-based, AI-powered, contract lifecycle management solution. Azure Form Recognizer enables Icertis Contract Intelligence to take key-value pairs embedded in contracts and create structured data understood and operated upon by machine algorithms. Through these and other powerful Azure Cognitive and AI services, Icertis empowers customers in every industry to improve business in multiple ways: optimized manufacturing operations, added agility to retail strategies, reduced risk in IT services, and faster delivery of life-saving pharmaceutical products. | [Blog](https://cloudblogs.microsoft.com/industry-blog/en-in/unicorn/2022/01/12/how-icertis-built-a-contract-management-solution-using-azure-form-recognizer/)| |**Instabase**| [**Instabase**](https://instabase.com/) is a horizontal application platform that provides best-in-class machine learning processes to help retrieve, organize, identify, and understand complex masses of unorganized data. Instabase then brings this data into business workflows as organized information. The platform provides a repository of integrative applications to orchestrate and harness that information with the means to rapidly extend and enhance them as required. Instabase applications are fully containerized for widespread, infrastructure-agnostic deployment. | [Customer story](https://customers.microsoft.com/en-gb/story/1376278902865681018-instabase-partner-professional-services-azure)| -|**Northern Trust**| [**Northern Trust**](https://www.northerntrust.com/) is a leading provider of wealth management, asset servicing, asset management, and banking to corporations, institutions, families, and individuals. As part of its initiative to digitize alternative asset servicing, Northern Trust has launched an AI-powered solution to extract unstructured investment data from alternative asset documents and making it accessible and actionable for asset-owner clients. Microsoft Azure Applied AI services accelerate time-to-value for enterprises building AI solutions. This proprietary solution transforms crucial information such as capital call notices, cash and stock distribution notices, and capital account statements from various unstructured formats into digital, actionable insights for investment teams. | [Customer story](https://www.businesswire.com/news/home/20210914005449/en/Northern-Trust-Automates-Data-Extraction-from-Alternative-Asset-Documentation)| +|**Northern Trust**| [**Northern Trust**](https://www.northerntrust.com/) is a leading provider of wealth management, asset servicing, asset management, and banking to corporations, institutions, families, and individuals. As part of its initiative to digitize alternative asset servicing, Northern Trust has launched an AI-powered solution to extract unstructured investment data from alternative asset documents and making it accessible and actionable for asset-owner clients. Azure Applied AI services accelerate time-to-value for enterprises building AI solutions. This proprietary solution transforms crucial information from various unstructured formats into digital, actionable insights for investment teams. | [Customer story](https://www.businesswire.com/news/home/20210914005449/en/Northern-Trust-Automates-Data-Extraction-from-Alternative-Asset-Documentation)| |**Standard Bank**| [**Standard Bank of South Africa**](https://www.standardbank.co.za/southafrica/personal/home) is Africa's largest bank by assets. Standard Bank is headquartered in Johannesburg, South Africa, and has more than 150 years of trade experience in Africa and beyond. When manual due diligence in cross-border transactions began absorbing too much staff time, the bank decided it needed a new way forward. Standard Bank uses Form Recognizer to significantly reduce its cross-border payments registration and processing time. | [Customer story](https://customers.microsoft.com/en-hk/story/1395059149522299983-standard-bank-of-south-africa-banking-capital-markets-azure-en-south-africa)| | **WEX**| [**WEX**](https://www.wexinc.com/) has developed a tool to process Explanation of Benefits documents using Form Recognizer. "The technology is truly amazing. I was initially worried that this type of solution wouldn't be feasible, but I soon realized that Form Recognizer can read virtually any document with accuracy." Matt Dallahan, Senior Vice President of Product Management and Strategy | [Blog](https://techcommunity.microsoft.com/t5/azure-ai/form-recognizer-now-reads-more-languages-processes-ids-and/ba-p/2179428)| -|**Wilson Allen** | [**Wilson Allen**](https://wilsonallen.com/) took advantage of AI container support for Microsoft Azure Cognitive Services and created a powerful AI solution that help firms around the world find unprecedented levels of insight in previously siloed and unstructured data. Its clients can use this data to support business development and foster client relationships. | [Customer story](https://customers.microsoft.com/story/814361-wilson-allen-partner-professional-services-azure)| +|**Wilson Allen** | [**Wilson Allen**](https://wilsonallen.com/) took advantage of AI container support for Azure Cognitive Services and created a powerful AI solution that help firms around the world find unprecedented levels of insight in previously siloed and unstructured data. Its clients can use this data to support business development and foster client relationships. | [Customer story](https://customers.microsoft.com/story/814361-wilson-allen-partner-professional-services-azure)| |**Zelros**| [**Zelros**](http://www.zelros.com/) offers AI-powered software for the insurance industry. Insurers use the Zelros platform to take in forms and seamlessly manage customer enrollment and claims filing. The company combined its technology with Form Recognizer to automatically pull key-value pairs and text out of documents. When insurers use the Zelros platform, they can quickly process paperwork, ensure high accuracy, and redirect thousands of hours previously spent on manual data extraction toward better service. | [Customer story](https://customers.microsoft.com/story/816397-zelros-insurance-azure)| diff --git a/articles/applied-ai-services/metrics-advisor/whats-new.md b/articles/applied-ai-services/metrics-advisor/whats-new.md index d5a67006d1e9..1670177f1a30 100644 --- a/articles/applied-ai-services/metrics-advisor/whats-new.md +++ b/articles/applied-ai-services/metrics-advisor/whats-new.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: applied-ai-services ms.subservice: metrics-advisor ms.topic: overview -ms.date: 10/14/2020 +ms.date: 05/25/2022 ms.author: mbullwin --- @@ -16,6 +16,12 @@ ms.author: mbullwin Welcome! This page covers what's new in the Metrics Advisor docs. Check back every month for information on service changes, doc additions and updates this month. +## May 2022 + + **Detection configuration auto-tuning** has been released. This feature enables you to customize the service to better surface and personalize anomalies. Instead of the traditional way of setting configurations for each time series or a group of time series. A guided experience is provided to capture your detection preferences, such as the level of sensitivity, and the types of anomaly patterns, which allows you to tailor the model to your own needs on the back end. Those preferences can then be applied to all the time series you're monitoring. This allows you to reduce configuration costs while achieving better detection results. + +Check out [this article](how-tos/configure-metrics.md#tune-the-detection-configuration) to learn how to take advantage of the new feature. + ## SDK updates If you want to learn about the latest updates to Metrics Advisor client SDKs see: diff --git a/articles/attestation/audit-logs.md b/articles/attestation/audit-logs.md index 8ef8823540a4..8da2a9ece7b5 100644 --- a/articles/attestation/audit-logs.md +++ b/articles/attestation/audit-logs.md @@ -65,7 +65,7 @@ Individual blobs are stored as text, formatted as a JSON blob. Let’s look at a } ``` -Most of these fields are documented in the [Top-level common schema](/azure/azure-monitor/essentials/resource-logs-schema#top-level-common-schema). The following table lists the field names and descriptions for the entries not included in the top-level common schema: +Most of these fields are documented in the [Top-level common schema](../azure-monitor/essentials/resource-logs-schema.md#top-level-common-schema). The following table lists the field names and descriptions for the entries not included in the top-level common schema: | Field Name | Description | |------------------------------------------|-----------------------------------------------------------------------------------------------| @@ -82,4 +82,4 @@ The properties contain additional Azure attestation specific context: | infoDataReceived | Information about the request received from the client. Includes some HTTP headers, the number of headers received, the content type and content length | ## Next steps -- [How to enable Microsoft Azure Attestation logging ](azure-diagnostic-monitoring.md) +- [How to enable Microsoft Azure Attestation logging ](azure-diagnostic-monitoring.md) \ No newline at end of file diff --git a/articles/automanage/automanage-linux.md b/articles/automanage/automanage-linux.md index e833b45790e9..09bf3b9e8a6e 100644 --- a/articles/automanage/automanage-linux.md +++ b/articles/automanage/automanage-linux.md @@ -40,7 +40,7 @@ Automanage supports the following Linux distributions and versions: |[Guest configuration](../governance/policy/concepts/guest-configuration.md) | Guest configuration is used to monitor the configuration and report on the compliance of the machine. The Automanage service will install the Azure Linux baseline using the guest configuration extension. For Linux machines, the guest configuration service will install the baseline in audit-only mode. You will be able to see where your VM is out of compliance with the baseline, but noncompliance won't be automatically remediated. Learn [more](../governance/policy/concepts/guest-configuration.md). |Production, Dev/Test | |[Boot Diagnostics](../virtual-machines/boot-diagnostics.md) | Boot diagnostics is a debugging feature for Azure virtual machines (VM) that allows diagnosis of VM boot failures. Boot diagnostics enables a user to observe the state of their VM as it is booting up by collecting serial log information and screenshots. This will only be enabled for machines that are using managed disks. |Production, Dev/Test | |[Azure Automation Account](../automation/automation-create-standalone-account.md) |Azure Automation supports management throughout the lifecycle of your infrastructure and applications. Learn [more](../automation/automation-intro.md). |Production, Dev/Test | -|[Log Analytics Workspace](../azure-monitor/logs/log-analytics-overview.md) |Azure Monitor stores log data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. Learn [more](../azure-monitor/logs/design-logs-deployment.md). |Production, Dev/Test | +|[Log Analytics Workspace](../azure-monitor/logs/log-analytics-workspace-overview.md) |Azure Monitor stores log data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. Learn [more](../azure-monitor/logs/workspace-design.md). |Production, Dev/Test | 1 The configuration profile selection is available when you are enabling Automanage. Learn [more](automanage-virtual-machines.md#configuration-profile). You can also create your own custom profile with the set of Azure services and settings that you need. diff --git a/articles/automanage/virtual-machines-best-practices.md b/articles/automanage/virtual-machines-best-practices.md index b4399d38046e..6ad448a33654 100644 --- a/articles/automanage/virtual-machines-best-practices.md +++ b/articles/automanage/virtual-machines-best-practices.md @@ -28,7 +28,7 @@ For all of these services, we will auto-onboard, auto-configure, monitor for dri |Change Tracking & Inventory |Change Tracking and Inventory combines change tracking and inventory functions to allow you to track virtual machine and server infrastructure changes. The service supports change tracking across services, daemons software, registry, and files in your environment to help you diagnose unwanted changes and raise alerts. Inventory support allows you to query in-guest resources for visibility into installed applications and other configuration items. Learn [more](../automation/change-tracking/overview.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | |Guest configuration | Guest configuration is used to monitor the configuration and report on the compliance of the machine. The Automanage service will install the [Windows security baselines](/windows/security/threat-protection/windows-security-baselines) using the guest configuration extension. Learn [more](../governance/policy/concepts/guest-configuration.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | |Azure Automation Account |Azure Automation supports management throughout the lifecycle of your infrastructure and applications. Learn [more](../automation/automation-intro.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | -|Log Analytics Workspace |Azure Monitor stores log data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. Learn [more](../azure-monitor/logs/design-logs-deployment.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | +|Log Analytics Workspace |Azure Monitor stores log data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. Learn [more](../azure-monitor/logs/log-analytics-workspace-overview.md). |Azure VM Best Practices – Production, Azure VM Best Practices – Dev/Test |No | 1 Configuration profiles are available when you are enabling Automanage. Learn [more](automanage-virtual-machines.md). You can also adjust the default settings of the configuration profile and set your own preferences within the best practices constraints. diff --git a/articles/automation/automation-hybrid-runbook-worker.md b/articles/automation/automation-hybrid-runbook-worker.md index 2ee01f537abc..ed9bdbfda406 100644 --- a/articles/automation/automation-hybrid-runbook-worker.md +++ b/articles/automation/automation-hybrid-runbook-worker.md @@ -17,7 +17,7 @@ Azure Automation provides native integration of the Hybrid Runbook Worker role t | Platform | Description | |---|---| |**Extension-based (V2)** |Installed using the [Hybrid Runbook Worker VM extension](./extension-based-hybrid-runbook-worker-install.md), without any dependency on the Log Analytics agent reporting to an Azure Monitor Log Analytics workspace. **This is the recommended platform**.| -|**Agent-based (V1)** |Installed after the [Log Analytics agent](../azure-monitor/agents/log-analytics-agent.md) reporting to an Azure Monitor [Log Analytics workspace](../azure-monitor/logs/design-logs-deployment.md) is completed.| +|**Agent-based (V1)** |Installed after the [Log Analytics agent](../azure-monitor/agents/log-analytics-agent.md) reporting to an Azure Monitor [Log Analytics workspace](../azure-monitor/logs/log-analytics-workspace-overview.md) is completed.| :::image type="content" source="./media/automation-hybrid-runbook-worker/hybrid-worker-group-platform.png" alt-text="Hybrid worker group showing platform field"::: @@ -47,7 +47,7 @@ There are two types of Runbook Workers - system and user. The following table de |**System** |Supports a set of hidden runbooks used by the Update Management feature that are designed to install user-specified updates on Windows and Linux machines.
    This type of Hybrid Runbook Worker isn't a member of a Hybrid Runbook Worker group, and therefore doesn't run runbooks that target a Runbook Worker group. | |**User** |Supports user-defined runbooks intended to run directly on the Windows and Linux machine that are members of one or more Runbook Worker groups. | -Agent-based (V1) Hybrid Runbook Workers rely on the [Log Analytics agent](../azure-monitor/agents/log-analytics-agent.md) reporting to an Azure Monitor [Log Analytics workspace](../azure-monitor/logs/design-logs-deployment.md). The workspace isn't only to collect monitoring data from the machine, but also to download the components required to install the agent-based Hybrid Runbook Worker. +Agent-based (V1) Hybrid Runbook Workers rely on the [Log Analytics agent](../azure-monitor/agents/log-analytics-agent.md) reporting to an Azure Monitor [Log Analytics workspace](../azure-monitor/logs/log-analytics-workspace-overview.md). The workspace isn't only to collect monitoring data from the machine, but also to download the components required to install the agent-based Hybrid Runbook Worker. When Azure Automation [Update Management](./update-management/overview.md) is enabled, any machine connected to your Log Analytics workspace is automatically configured as a system Hybrid Runbook Worker. To configure it as a user Windows Hybrid Runbook Worker, see [Deploy an agent-based Windows Hybrid Runbook Worker in Automation](automation-windows-hrw-install.md) and for Linux, see [Deploy an agent-based Linux Hybrid Runbook Worker in Automation](./automation-linux-hrw-install.md). diff --git a/articles/automation/automation-linux-hrw-install.md b/articles/automation/automation-linux-hrw-install.md index fb56541bb947..93759e885eaf 100644 --- a/articles/automation/automation-linux-hrw-install.md +++ b/articles/automation/automation-linux-hrw-install.md @@ -28,7 +28,7 @@ Before you start, make sure that you have the following. The Hybrid Runbook Worker role depends on an Azure Monitor Log Analytics workspace to install and configure the role. You can create it through [Azure Resource Manager](../azure-monitor/logs/resource-manager-workspace.md#create-a-log-analytics-workspace), through [PowerShell](../azure-monitor/logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json), or in the [Azure portal](../azure-monitor/logs/quick-create-workspace.md). -If you don't have an Azure Monitor Log Analytics workspace, review the [Azure Monitor Log design guidance](../azure-monitor/logs/design-logs-deployment.md) before you create the workspace. +If you don't have an Azure Monitor Log Analytics workspace, review the [Azure Monitor Log design guidance](../azure-monitor/logs/workspace-design.md) before you create the workspace. ### Log Analytics agent diff --git a/articles/automation/automation-runbook-types.md b/articles/automation/automation-runbook-types.md index 6fad31f9f13f..436978159d35 100644 --- a/articles/automation/automation-runbook-types.md +++ b/articles/automation/automation-runbook-types.md @@ -64,7 +64,7 @@ The same Azure sandbox and Hybrid Runbook Worker can execute **PowerShell 5.1** Ensure that you select the right Runtime Version for modules. -For example : if you are executing a runbook for a Sharepoint automation scenario in **Runtime version** *7.1 (preview)*, then import the module in **Runtime version** **7.1 (preview)**; if you are executing a runbook for a Sharepoint automation scenario in **Runtime version** **5.1**, then import the module in **Runtime version** *5.1*. In this case, you would see two entries for the module, one for **Runtime Version** **7.1(preview)** and other for **5.1**. +For example : if you are executing a runbook for a SharePoint automation scenario in **Runtime version** *7.1 (preview)*, then import the module in **Runtime version** **7.1 (preview)**; if you are executing a runbook for a SharePoint automation scenario in **Runtime version** **5.1**, then import the module in **Runtime version** *5.1*. In this case, you would see two entries for the module, one for **Runtime Version** **7.1(preview)** and other for **5.1**. :::image type="content" source="./media/automation-runbook-types/runbook-types.png" alt-text="runbook Types."::: diff --git a/articles/automation/automation-solution-vm-management.md b/articles/automation/automation-solution-vm-management.md index 7768e0e2bea7..a0e0ff4b4dcb 100644 --- a/articles/automation/automation-solution-vm-management.md +++ b/articles/automation/automation-solution-vm-management.md @@ -37,7 +37,7 @@ The following are limitations with the current feature: - The runbooks for the Start/Stop VMs during off hours feature work with an [Azure Run As account](./automation-security-overview.md#run-as-accounts). The Run As account is the preferred authentication method because it uses certificate authentication instead of a password that might expire or change frequently. -- An [Azure Monitor Log Analytics workspace](../azure-monitor/logs/design-logs-deployment.md) that stores the runbook job logs and job stream results in a workspace to query and analyze. The Automation account and Log Analytics workspace need to be in the same subscription and supported region. The workspace needs to already exist, you cannot create a new workspace during deployment of this feature. +- An [Azure Monitor Log Analytics workspace](../azure-monitor/logs/log-analytics-workspace-overview.md) that stores the runbook job logs and job stream results in a workspace to query and analyze. The Automation account and Log Analytics workspace need to be in the same subscription and supported region. The workspace needs to already exist, you cannot create a new workspace during deployment of this feature. We recommend that you use a separate Automation account for working with VMs enabled for the Start/Stop VMs during off-hours feature. Azure module versions are frequently upgraded, and their parameters might change. The feature isn't upgraded on the same cadence and it might not work with newer versions of the cmdlets that it uses. Before importing the updated modules into your production Automation account(s), we recommend you import them into a test Automation account to verify there aren't any compatibility issues. diff --git a/articles/automation/automation-update-azure-modules.md b/articles/automation/automation-update-azure-modules.md index 827b74e32438..432e3a9b098a 100644 --- a/articles/automation/automation-update-azure-modules.md +++ b/articles/automation/automation-update-azure-modules.md @@ -28,7 +28,7 @@ If you develop your scripts locally, it's recommended to have the same module ve ## Update Az modules -You can update Az modules through the portal **(recommended)** or through the runbook. +The following sections explains on how you can update Az modules either through the **portal** (recommended) or through the runbook. ### Update Az modules through portal @@ -52,7 +52,14 @@ The Azure team will regularly update the module version and provide an option to ### Update Az modules through runbook -To update the Azure modules in your Automation account, you must use the [Update-AutomationAzureModulesForAccount](https://github.com/Microsoft/AzureAutomation-Account-Modules-Update) runbook, available as open source. To start using this runbook to update your Azure modules, download it from the GitHub repository. You can then import it into your Automation account or run it as a script. To learn how to import a runbook in your Automation account, see [Import a runbook](manage-runbooks.md#import-a-runbook). In case of any runbook failure, we recommend that you modify the parameters in the runbook according to your specific needs, as the runbook is available as open-source and provided as a reference. +To update the Azure modules in your Automation account: + +1. Use the [Update-AutomationAzureModulesForAccount](https://github.com/Microsoft/AzureAutomation-Account-Modules-Update) runbook, available as open source. +1. Download from the GitHub repository, to start using this runbook to update your Azure modules. +1. Import it into your Automation account or run it as a script. To learn how to import a runbook in your Automation account, see [Import a runbook](manage-runbooks.md#import-a-runbook). + +>[!NOTE] +> We recommend you to update Az modules through Azure portal. You can also perform this using the `Update-AutomationAzureModulesForAccount` script, available as open-source and provided as a reference. However, in case of any runbook failure, you need to modify parameters in the runbook as required or debug the script as per the scenario. The **Update-AutomationAzureModulesForAccount** runbook supports updating the Azure, AzureRM, and Az modules by default. Review the [Update Azure modules runbook README](https://github.com/microsoft/AzureAutomation-Account-Modules-Update/blob/master/README.md) for more information on updating Az.Automation modules with this runbook. There are additional important factors that you need to take into account when using the Az modules in your Automation account. To learn more, see [Manage modules in Azure Automation](shared-resources/modules.md). diff --git a/articles/automation/automation-windows-hrw-install.md b/articles/automation/automation-windows-hrw-install.md index 83f64ba1117c..bed2ea76a9af 100644 --- a/articles/automation/automation-windows-hrw-install.md +++ b/articles/automation/automation-windows-hrw-install.md @@ -28,7 +28,7 @@ Before you start, make sure that you have the following. The Hybrid Runbook Worker role depends on an Azure Monitor Log Analytics workspace to install and configure the role. You can create it through [Azure Resource Manager](../azure-monitor/logs/resource-manager-workspace.md#create-a-log-analytics-workspace), through [PowerShell](../azure-monitor/logs/powershell-workspace-configuration.md?toc=%2fpowershell%2fmodule%2ftoc.json), or in the [Azure portal](../azure-monitor/logs/quick-create-workspace.md). -If you don't have an Azure Monitor Log Analytics workspace, review the [Azure Monitor Log design guidance](../azure-monitor/logs/design-logs-deployment.md) before you create the workspace. +If you don't have an Azure Monitor Log Analytics workspace, review the [Azure Monitor Log design guidance](../azure-monitor/logs/workspace-design.md) before you create the workspace. ### Log Analytics agent diff --git a/articles/automation/change-tracking/enable-from-runbook.md b/articles/automation/change-tracking/enable-from-runbook.md index 188b0e568e87..248627f3a6a0 100644 --- a/articles/automation/change-tracking/enable-from-runbook.md +++ b/articles/automation/change-tracking/enable-from-runbook.md @@ -23,7 +23,7 @@ This method uses two runbooks: * Azure subscription. If you don't have one yet, you can [activate your MSDN subscriber benefits](https://azure.microsoft.com/pricing/member-offers/msdn-benefits-details/) or sign up for a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). * [Automation account](../automation-security-overview.md) to manage machines. -* [Log Analytics workspace](../../azure-monitor/logs/design-logs-deployment.md) +* [Log Analytics workspace](../../azure-monitor/logs/log-analytics-workspace-overview.md) * A [virtual machine](../../virtual-machines/windows/quick-create-portal.md). * Two Automation assets, which are used by the **Enable-AutomationSolution** runbook. This runbook, if it doesn't already exist in your Automation account, is automatically imported by the **Enable-MultipleSolution** runbook during its first run. * *LASolutionSubscriptionId*: Subscription ID of where the Log Analytics workspace is located. diff --git a/articles/automation/disable-local-authentication.md b/articles/automation/disable-local-authentication.md index 9fb2c983ff7f..2743710a2130 100644 --- a/articles/automation/disable-local-authentication.md +++ b/articles/automation/disable-local-authentication.md @@ -20,7 +20,7 @@ Disabling local authentication doesn't take effect immediately. Allow a few minu >[!NOTE] > Currently, PowerShell support for the new API version (2021-06-22) or the flag – `DisableLocalAuth` is not available. However, you can use the Rest-API with this API version to update the flag. -To allow list and enroll your subscription for this feature in your respective regions, follow the steps in [how to create an Azure support request - Azure supportability | Microsoft Docs](/azure/azure-portal/supportability/how-to-create-azure-support-request). +To allow list and enroll your subscription for this feature in your respective regions, follow the steps in [how to create an Azure support request - Azure supportability | Microsoft Docs](../azure-portal/supportability/how-to-create-azure-support-request.md). ## Re-enable local authentication @@ -42,4 +42,4 @@ Update Management patching will not work when local authentication is disabled. ## Next steps -- [Azure Automation account authentication overview](./automation-security-overview.md) +- [Azure Automation account authentication overview](./automation-security-overview.md) \ No newline at end of file diff --git a/articles/automation/extension-based-hybrid-runbook-worker-install.md b/articles/automation/extension-based-hybrid-runbook-worker-install.md index 8f538dd27843..ea91259d254d 100644 --- a/articles/automation/extension-based-hybrid-runbook-worker-install.md +++ b/articles/automation/extension-based-hybrid-runbook-worker-install.md @@ -67,6 +67,20 @@ If you use a proxy server for communication between Azure Automation and machine > [!NOTE] > You can set up the proxy settings by PowerShell cmdlets or API. + To install the extension using cmdlets: + +1. Get the automation account details using the below API call. + + ```http + GET https://westcentralus.management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}?api-version=2021-06-22 + + ``` + + The API call will provide the value with the key: `AutomationHybridServiceUrl`. Use the URL in the next step to enable extension on the VM. + +1. Install the Hybrid Worker Extension on the VM by running the following PowerShell cmdlet (Required module: Az.Compute). Use the `properties.automationHybridServiceUrl` provided by the above API call + + **Proxy server settings** # [Windows](#tab/windows) @@ -82,6 +96,17 @@ $protectedsettings = @{ "ProxyPassword" = "password"; }; ``` +**Azure VMs** + +```powershell +Set-AzVMExtension -ResourceGroupName -Location -VMName -Name "HybridWorkerExtension" -Publisher "Microsoft.Azure.Automation.HybridWorker" -ExtensionType HybridWorkerForWindows -TypeHandlerVersion 0.1 -Settings $settings +``` + +**Azure Arc-enabled VMs** + +```powershell +New-AzConnectedMachineExtension -ResourceGroupName -Location -VMName -Name "HybridWorkerExtension" -Publisher "Microsoft.Azure.Automation.HybridWorker" -ExtensionType HybridWorkerForWindows -TypeHandlerVersion 0.1 -Settings $settings -NoWait +``` # [Linux](#tab/linux) @@ -93,6 +118,18 @@ $settings = @{ "AutomationAccountURL" = "/"; }; ``` +**Azure VMs** + +```powershell +Set-AzVMExtension -ResourceGroupName -Location -VMName -Name "HybridWorkerExtension" -Publisher "Microsoft.Azure.Automation.HybridWorker" -ExtensionType HybridWorkerForLinux -TypeHandlerVersion 0.1 -Settings $settings +``` + +**Azure Arc-enabled VMs** + +```powershell +New-AzConnectedMachineExtension -ResourceGroupName -Location -VMName -Name "HybridWorkerExtension" -Publisher "Microsoft.Azure.Automation.HybridWorker" -ExtensionType HybridWorkerForLinux -TypeHandlerVersion 0.1 -Settings $settings -NoWait +``` + --- ### Firewall use diff --git a/articles/automation/overview.md b/articles/automation/overview.md index b49b6729333c..06ff2f91fa8d 100644 --- a/articles/automation/overview.md +++ b/articles/automation/overview.md @@ -135,7 +135,7 @@ These Azure services can work with Automation job and runbook resources using an ## Pricing for Azure Automation -Process automation includes runbook jobs and watchers. Billing for jobs is based on the number of job run time minutes used in the month, and for watchers, it is on the number of hours used in a month. The charges for process automation are incurred whenever a [job](/azure/automation/start-runbooks) or [watcher](/azure/automation/automation-scenario-using-watcher-task) runs. +Process automation includes runbook jobs and watchers. Billing for jobs is based on the number of job run time minutes used in the month, and for watchers, it is on the number of hours used in a month. The charges for process automation are incurred whenever a [job](./start-runbooks.md) or [watcher](./automation-scenario-using-watcher-task.md) runs. You create Automation accounts with a Basic SKU, wherein the first 500 job run time minutes are free per subscription. You are billed only for minutes/hours that exceed the 500 mins free included units. You can review the prices associated with Azure Automation on the [pricing](https://azure.microsoft.com/pricing/details/automation/) page. @@ -143,4 +143,4 @@ You can review the prices associated with Azure Automation on the [pricing](http ## Next steps > [!div class="nextstepaction"] -> [Create an Automation account](./quickstarts/create-account-portal.md) +> [Create an Automation account](./quickstarts/create-account-portal.md) \ No newline at end of file diff --git a/articles/automation/quickstart-create-automation-account-template.md b/articles/automation/quickstart-create-automation-account-template.md index 469f9d7c9991..5ef25b462e91 100644 --- a/articles/automation/quickstart-create-automation-account-template.md +++ b/articles/automation/quickstart-create-automation-account-template.md @@ -35,7 +35,7 @@ If you're new to Azure Automation and Azure Monitor, it's important that you und * Review [workspace mappings](how-to/region-mappings.md) to specify the supported regions inline or in a parameter file. Only certain regions are supported for linking a Log Analytics workspace and an Automation account in your subscription. -* If you're new to Azure Monitor Logs and haven't deployed a workspace already, review the [workspace design guidance](../azure-monitor/logs/design-logs-deployment.md). This document will help you learn about access control, and help you understand the recommended design implementation strategies for your organization. +* If you're new to Azure Monitor Logs and haven't deployed a workspace already, review the [workspace design guidance](../azure-monitor/logs/workspace-design.md). This document will help you learn about access control, and help you understand the recommended design implementation strategies for your organization. ## Review the template diff --git a/articles/automation/troubleshoot/update-agent-issues.md b/articles/automation/troubleshoot/update-agent-issues.md index e6f97785dead..35aefce6628d 100644 --- a/articles/automation/troubleshoot/update-agent-issues.md +++ b/articles/automation/troubleshoot/update-agent-issues.md @@ -44,7 +44,7 @@ Results are shown on the page when they're ready. The checks sections show what' ### Operating system -The operating system check verifies whether the Hybrid Runbook Worker is running [one of the supported operating systems.](/azure/automation/update-management/operating-system-requirements.md#windows-operating-system) +The operating system check verifies whether the Hybrid Runbook Worker is running [one of the supported operating systems.](../update-management/operating-system-requirements.md) one of the supported operating systems ### .NET 4.6.2 diff --git a/articles/automation/update-management/enable-from-runbook.md b/articles/automation/update-management/enable-from-runbook.md index bc2facd42771..109ad1e3b3bb 100644 --- a/articles/automation/update-management/enable-from-runbook.md +++ b/articles/automation/update-management/enable-from-runbook.md @@ -24,7 +24,7 @@ This method uses two runbooks: * Azure subscription. If you don't have one yet, you can [activate your MSDN subscriber benefits](https://azure.microsoft.com/pricing/member-offers/msdn-benefits-details/) or sign up for a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). * [Automation account](../automation-security-overview.md) to manage machines. -* [Log Analytics workspace](../../azure-monitor/logs/design-logs-deployment.md) +* [Log Analytics workspace](../../azure-monitor/logs/log-analytics-workspace-overview.md) * A [virtual machine](../../virtual-machines/windows/quick-create-portal.md). * Two Automation assets, which are used by the **Enable-AutomationSolution** runbook. This runbook, if it doesn't already exist in your Automation account, is automatically imported by the **Enable-MultipleSolution** runbook during its first run. * *LASolutionSubscriptionId*: Subscription ID of where the Log Analytics workspace is located. diff --git a/articles/automation/update-management/enable-from-template.md b/articles/automation/update-management/enable-from-template.md index 959c210c6e8b..34b6ccb7d9ef 100644 --- a/articles/automation/update-management/enable-from-template.md +++ b/articles/automation/update-management/enable-from-template.md @@ -62,7 +62,7 @@ If you're new to Azure Automation and Azure Monitor, it's important that you und * Review [workspace mappings](../how-to/region-mappings.md) to specify the supported regions inline or in a parameter file. Only certain regions are supported for linking a Log Analytics workspace and an Automation account in your subscription. -* If you're new to Azure Monitor logs and have not deployed a workspace already, you should review the [workspace design guidance](../../azure-monitor/logs/design-logs-deployment.md). It will help you to learn about access control, and understand the design implementation strategies we recommend for your organization. +* If you're new to Azure Monitor logs and have not deployed a workspace already, you should review the [workspace design guidance](../../azure-monitor/logs/workspace-design.md). It will help you to learn about access control, and understand the design implementation strategies we recommend for your organization. ## Deploy template diff --git a/articles/automation/update-management/plan-deployment.md b/articles/automation/update-management/plan-deployment.md index e090c8085cd6..49f881e1d929 100644 --- a/articles/automation/update-management/plan-deployment.md +++ b/articles/automation/update-management/plan-deployment.md @@ -17,7 +17,7 @@ Update Management is an Azure Automation feature, and therefore requires an Auto Update Management depends on a Log Analytics workspace in Azure Monitor to store assessment and update status log data collected from managed machines. Integration with Log Analytics also enables detailed analysis and alerting in Azure Monitor. You can use an existing workspace in your subscription, or create a new one dedicated only for Update Management. -If you are new to Azure Monitor Logs and the Log Analytics workspace, you should review the [Design a Log Analytics workspace](../../azure-monitor/logs/design-logs-deployment.md) deployment guide. +If you are new to Azure Monitor Logs and the Log Analytics workspace, you should review the [Design a Log Analytics workspace](../../azure-monitor/logs/workspace-design.md) deployment guide. ## Step 3 - Supported operating systems diff --git a/articles/availability-zones/az-overview.md b/articles/availability-zones/az-overview.md index 05c76a63d074..635babc224aa 100644 --- a/articles/availability-zones/az-overview.md +++ b/articles/availability-zones/az-overview.md @@ -42,21 +42,7 @@ Some organizations require high availability of availability zones and protectio ## Azure regions with availability zones -Azure provides the most extensive global footprint of any cloud provider and is rapidly opening new regions and availability zones. The following regions currently support availability zones. - -| Americas | Europe | Africa | Asia Pacific | -|--------------------|----------------------|---------------------|----------------| -| Brazil South | France Central | South Africa North | Australia East | -| Canada Central | Germany West Central | | Central India | -| Central US | North Europe | | Japan East | -| East US | Norway East | | Korea Central | -| East US 2 | UK South | | Southeast Asia | -| South Central US | West Europe | | East Asia | -| US Gov Virginia | Sweden Central | | China North 3 | -| West US 2 | Switzerland North* | | | -| West US 3 | | | | - -\* To learn more about Availability Zones and available services support in these regions, contact your Microsoft sales or customer representative. For the upcoming regions that will support Availability Zones, see [Azure geographies](https://azure.microsoft.com/global-infrastructure/geographies/). +[!INCLUDE [availability-zone-regions-include](./includes/availability-zone-regions-include.md)] ## Next steps diff --git a/articles/availability-zones/az-region.md b/articles/availability-zones/az-region.md index 17c86138ed81..5a19d9a8a420 100644 --- a/articles/availability-zones/az-region.md +++ b/articles/availability-zones/az-region.md @@ -19,21 +19,7 @@ Azure strives to enable high resiliency across every service and offering. Runni ## Azure regions with availability zones -Azure provides the most extensive global footprint of any cloud provider and is rapidly opening new regions and availability zones. The following regions currently support availability zones. - -| Americas | Europe | Africa | Asia Pacific | -|--------------------|----------------------|---------------------|----------------| -| Brazil South | France Central | South Africa North | Australia East | -| Canada Central | Germany West Central | | Central India | -| Central US | North Europe | | Japan East | -| East US | Norway East | | Korea Central | -| East US 2 | UK South | | Southeast Asia | -| South Central US | West Europe | | East Asia | -| US Gov Virginia | Sweden Central | | China North 3 | -| West US 2 | Switzerland North* | | | -| West US 3 | | | | - -\* To learn more about Availability Zones and available services support in these regions, contact your Microsoft sales or customer representative. For the upcoming regions that will support Availability Zones, see [Azure geographies](https://azure.microsoft.com/global-infrastructure/geographies/). +[!INCLUDE [availability-zone-regions-include](./includes/availability-zone-regions-include.md)] For a list of Azure services that support availability zones by Azure region, see the [availability zones documentation](az-overview.md). diff --git a/articles/availability-zones/includes/availability-zone-regions-include.md b/articles/availability-zones/includes/availability-zone-regions-include.md new file mode 100644 index 000000000000..f2a3a16d5228 --- /dev/null +++ b/articles/availability-zones/includes/availability-zone-regions-include.md @@ -0,0 +1,26 @@ +--- + title: include file + description: include file + author: awysza + ms.service: azure + ms.topic: include + ms.date: 05/18/2022 + ms.author: rarco + ms.custom: include file +--- + +Azure provides the most extensive global footprint of any cloud provider and is rapidly opening new regions and availability zones. The following regions currently support availability zones. + +| Americas | Europe | Africa | Asia Pacific | +|--------------------|----------------------|---------------------|----------------| +| Brazil South | France Central | South Africa North | Australia East | +| Canada Central | Germany West Central | | Central India | +| Central US | North Europe | | Japan East | +| East US | Norway East | | Korea Central | +| East US 2 | UK South | | Southeast Asia | +| South Central US | West Europe | | East Asia | +| US Gov Virginia | Sweden Central | | China North 3 | +| West US 2 | Switzerland North* | | | +| West US 3 | | | | + +\* To learn more about Availability Zones and available services support in these regions, contact your Microsoft sales or customer representative. For the upcoming regions that will support Availability Zones, see [Azure geographies](https://azure.microsoft.com/global-infrastructure/geographies/). \ No newline at end of file diff --git a/articles/azure-app-configuration/concept-github-action.md b/articles/azure-app-configuration/concept-github-action.md index 4b560432d1fd..00ac78407940 100644 --- a/articles/azure-app-configuration/concept-github-action.md +++ b/articles/azure-app-configuration/concept-github-action.md @@ -20,7 +20,7 @@ A GitHub Actions [workflow](https://docs.github.com/en/actions/learn-github-acti The GitHub [documentation](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions) provides in-depth view of GitHub workflows and actions. ## Enable GitHub Actions in your repository -To start using this GitHub action, go to your repository and select the **Actions** tab. Select **New workflow**, then **Set up a workflow yourself**. Finally, search the marketplace for “Azure App Configuration Sync.” +To start using this GitHub Action, go to your repository and select the **Actions** tab. Select **New workflow**, then **Set up a workflow yourself**. Finally, search the marketplace for “Azure App Configuration Sync.” > [!div class="mx-imgBorder"] > ![Select the Action tab](media/find-github-action.png) @@ -57,7 +57,7 @@ jobs: ``` ## Use strict sync -By default the GitHub action does not enable strict mode, meaning that the sync will only add key-values from the configuration file to the App Configuration instance (no key-value pairs will be deleted). Enabling strict mode will mean key-value pairs that aren't in the configuration file are deleted from the App Configuration instance, so that it matches the configuration file. If you are syncing from multiple sources or using Azure Key Vault with App Configuration, you'll want to use different prefixes or labels with strict sync to avoid wiping out configuration settings from other files (see samples below). +By default the GitHub Action does not enable strict mode, meaning that the sync will only add key-values from the configuration file to the App Configuration instance (no key-value pairs will be deleted). Enabling strict mode will mean key-value pairs that aren't in the configuration file are deleted from the App Configuration instance, so that it matches the configuration file. If you are syncing from multiple sources or using Azure Key Vault with App Configuration, you'll want to use different prefixes or labels with strict sync to avoid wiping out configuration settings from other files (see samples below). ```json on: diff --git a/articles/azure-arc/data/release-notes.md b/articles/azure-arc/data/release-notes.md index d0766a5a01d1..ff8c836f6753 100644 --- a/articles/azure-arc/data/release-notes.md +++ b/articles/azure-arc/data/release-notes.md @@ -189,7 +189,7 @@ For complete release version information, see [Version log](version-log.md). - Set `--readable-secondaries` to any value between 0 and the number of replicas minus 1. - `--readable-secondaries` only applies to Business Critical tier. - Automatic backups are taken on the primary instance in a Business Critical service tier when there are multiple replicas. When a failover happens, backups move to the new primary. -- [ReadWriteMany (RWX) capable storage class](/azure/aks/concepts-storage#azure-disks) is required for backups, for both General Purpose and Business Critical service tiers. Specifying a non-ReadWriteMany storage class will cause the SQL Managed Instance to be stuck in "Pending" status during deployment. +- [ReadWriteMany (RWX) capable storage class](../../aks/concepts-storage.md#azure-disks) is required for backups, for both General Purpose and Business Critical service tiers. Specifying a non-ReadWriteMany storage class will cause the SQL Managed Instance to be stuck in "Pending" status during deployment. - Billing support when using multiple read replicas. For additional information about service tiers, see [High Availability with Azure Arc-enabled SQL Managed Instance (preview)](managed-instance-high-availability.md). @@ -856,4 +856,4 @@ For instructions see [What are Azure Arc-enabled data services?](overview.md) - [Plan an Azure Arc-enabled data services deployment](plan-azure-arc-data-services.md) (requires installing the client tools first) - [Create an Azure SQL Managed Instance on Azure Arc](create-sql-managed-instance.md) (requires creation of an Azure Arc data controller first) - [Create an Azure Database for PostgreSQL Hyperscale server group on Azure Arc](create-postgresql-hyperscale-server-group.md) (requires creation of an Azure Arc data controller first) -- [Resource providers for Azure services](../../azure-resource-manager/management/azure-services-resource-providers.md) +- [Resource providers for Azure services](../../azure-resource-manager/management/azure-services-resource-providers.md) \ No newline at end of file diff --git a/articles/azure-arc/kubernetes/extensions.md b/articles/azure-arc/kubernetes/extensions.md index 537f9fc4e3af..e75bd2be8d13 100644 --- a/articles/azure-arc/kubernetes/extensions.md +++ b/articles/azure-arc/kubernetes/extensions.md @@ -65,7 +65,7 @@ A conceptual overview of this feature is available in [Cluster extensions - Azur | [Azure API Management on Azure Arc](../../api-management/how-to-deploy-self-hosted-gateway-azure-arc.md) | Deploy and manage API Management gateway on Azure Arc-enabled Kubernetes clusters. | | [Azure Arc-enabled Machine Learning](../../machine-learning/how-to-attach-kubernetes-anywhere.md) | Deploy and run Azure Machine Learning on Azure Arc-enabled Kubernetes clusters. | | [Flux (GitOps)](./conceptual-gitops-flux2.md) | Use GitOps with Flux to manage cluster configuration and application deployment. | -| [Dapr extension for Azure Kubernetes Service (AKS) and Arc-enabled Kubernetes](/azure/aks/dapr)| Eliminates the overhead of downloading Dapr tooling and manually installing and managing the runtime on your clusters. | +| [Dapr extension for Azure Kubernetes Service (AKS) and Arc-enabled Kubernetes](../../aks/dapr.md)| Eliminates the overhead of downloading Dapr tooling and manually installing and managing the runtime on your clusters. | ## Usage of cluster extensions @@ -280,4 +280,4 @@ Learn more about the cluster extensions currently available for Azure Arc-enable > [Event Grid on Kubernetes](../../event-grid/kubernetes/overview.md) > > [!div class="nextstepaction"] -> [Azure API Management on Azure Arc](../../api-management/how-to-deploy-self-hosted-gateway-azure-arc.md) +> [Azure API Management on Azure Arc](../../api-management/how-to-deploy-self-hosted-gateway-azure-arc.md) \ No newline at end of file diff --git a/articles/azure-arc/kubernetes/media/gitops/flux2-config-install.png b/articles/azure-arc/kubernetes/media/gitops/flux2-config-install.png index 3013179a76d7..e9028d24b81d 100644 Binary files a/articles/azure-arc/kubernetes/media/gitops/flux2-config-install.png and b/articles/azure-arc/kubernetes/media/gitops/flux2-config-install.png differ diff --git a/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-AKS.png b/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-AKS.png index c5e939933246..23f6c124879f 100644 Binary files a/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-AKS.png and b/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-AKS.png differ diff --git a/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-Arc.png b/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-Arc.png index ee33fad499b9..2463e5c7a7e8 100644 Binary files a/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-Arc.png and b/articles/azure-arc/kubernetes/media/gitops/flux2-extension-install-Arc.png differ diff --git a/articles/azure-arc/kubernetes/move-regions.md b/articles/azure-arc/kubernetes/move-regions.md new file mode 100644 index 000000000000..2e10437ce88c --- /dev/null +++ b/articles/azure-arc/kubernetes/move-regions.md @@ -0,0 +1,88 @@ +--- +title: "Move Arc-enabled Kubernetes clusters between regions" +services: azure-arc +ms.service: azure-arc +ms.date: 03/03/2021 +ms.topic: how-to +ms.custom: subject-moving-resources +author: anraghun +ms.author: anraghun +description: "Manually move your Azure Arc-enabled Kubernetes between regions" +keywords: "Kubernetes, Arc, Azure, K8s, containers, region, move" +#Customer intent: As a Kubernetes cluster administrator, I want to move my Arc-enabled Kubernetes cluster to another Azure region. +--- + +# Move Arc-enabled Kubernetes clusters across Azure regions + +This article describes how to move Arc-enabled Kubernetes clusters (or connected cluster resources) to a different Azure region. You might move your resources to another region for a number of reasons. For example, to take advantage of a new Azure region, to deploy features or services available in specific regions only, to meet internal policy and governance requirements, or in response to capacity planning requirements. + +## Prerequisites + +- Ensure that Azure Arc-enabled Kubernetes resource (Microsoft.Kubernetes/connectedClusters) is supported in the target region. +- Ensure that Azure Arc-enabled Kubernetes configuration (Microsoft.KubernetesConfiguration/SourceControlConfigurations, Microsoft.KubernetesConfiguration/Extensions, Microsoft.KubernetesConfiguration/FluxConfigurations) resources are supported in the target region. +- Ensure that the Arc-enabled services you've deployed on top are supported in the target region. +- Ensure you have network access to the api server of your underlying Kubernetes cluster. + +## Prepare + +Before you begin, it's important to understand what moving these resources mean. + +### Kubernetes configurations + +Source control configurations, Flux configurations and extensions are child resources to the connected cluster resource. In order to move these resources, you'll first need to move the parent connected cluster resource. + +### Connected cluster + +The connectedClusters resource is the ARM representation of your Kubernetes clusters outside of Azure (on-premises, another cloud, edge...). The underlying infrastructure lies in your environment and Arc provides a first-class representation of the cluster on Azure, by installing agents on your cluster. + +When it comes to "moving" your Arc connected cluster, it means deleting the ARM resource in the source region, cleaning up the agents on your cluster and re-onboarding your cluster again in the target region. + +## Move + +### Kubernetes configurations + +1. Do a LIST of all configuration resources in the source cluster (the cluster to be moved) and save the response body to be used as the request body when re-creating these resources. + - [Microsoft.KubernetesConfiguration/SourceControlConfigurations](/cli/azure/k8s-configuration?view=azure-cli-latest&preserve-view=true#az-k8sconfiguration-list) + - [Microsoft.KubernetesConfiguration/Extensions](/cli/azure/k8s-extension?view=azure-cli-latest&preserve-view=true#az-k8s-extension-list) + - [Microsoft.KubernetesConfiguration/FluxConfigurations](/cli/azure/k8s-configuration/flux?view=azure-cli-latest&preserve-view=true#az-k8s-configuration-flux-list) + > [!NOTE] + > LIST/GET of configuration resources **do not** return `ConfigurationProtectedSettings`. + > For such cases, the only option is to save the original request body and reuse them while creating the resources in the new region. +2. [Delete](./move-regions.md#kubernetes-configurations-3) the above configuration resources. +2. Ensure the Arc connected cluster is up and running in the new region. This is the target cluster. +3. Re-create each of the configuration resources obtained in the LIST command from the source cluster on the target cluster. + +### Connected cluster + +1. [Delete](./move-regions.md#connected-cluster-3) the previous Arc deployment from the underlying Kubernetes cluster. +2. With network access to the underlying Kubernetes cluster, run [this command](./quickstart-connect-cluster.md?tabs=azure-cli#connect-an-existing-kubernetes-cluster) to create the Arc connected cluster in the new region. +> [!NOTE] +> The above command creates the cluster by default in the same location as its resource group. +> Use the `--location` parameter to explicitly provide the target region value. + +## Verify + +### Kubernetes configurations + +Do a LIST of all configuration resources in the target cluster. This should match the LIST response from the source cluster. + +### Connected cluster + +1. Run `az connectedk8s show -n -g ` and ensure the `connectivityStatus` value is `Connected`. +2. Run [this command](./quickstart-connect-cluster.md?tabs=azure-cli#view-azure-arc-agents-for-kubernetes) to verify all Arc agents are successfully deployed on the underlying cluster. + +## Clean up source resources + +### Kubernetes configurations + +Delete each of the configuration resources returned in the LIST command in the source cluster: +- [Microsoft.KubernetesConfiguration/SourceControlConfigurations](/cli/azure/k8s-configuration?view=azure-cli-latest&preserve-view=true#az-k8s-configuration-delete) +- [Microsoft.KubernetesConfiguration/Extensions](/cli/azure/k8s-extension?view=azure-cli-latest&preserve-view=true#az-k8s-extension-delete) +- [Microsoft.KubernetesConfiguration/FluxConfigurations](/cli/azure/k8s-configuration/flux?view=azure-cli-latest&preserve-view=true#az-k8s-configuration-flux-delete) + +> [!NOTE] +> This step may be skipped if the parent Arc connected cluster is also being deleted. Doing so would automatically remove the configuration resources on top. + +### Connected cluster + +With network access to the underlying Kubernetes cluster, run [this command](./quickstart-connect-cluster.md?tabs=azure-cli#clean-up-resources) to delete the Arc connected cluster. This command will clean up the Arc footprint on the underlying cluster as well as on ARM. \ No newline at end of file diff --git a/articles/azure-arc/kubernetes/toc.yml b/articles/azure-arc/kubernetes/toc.yml index 69fee039d14f..4a8d42e6761b 100644 --- a/articles/azure-arc/kubernetes/toc.yml +++ b/articles/azure-arc/kubernetes/toc.yml @@ -105,6 +105,8 @@ href: custom-locations.md - name: Azure Arc-enabled Machine Learning href: ../../machine-learning/how-to-attach-kubernetes-anywhere.md?toc=/azure/azure-arc/kubernetes/toc.json&bc=/azure/azure-arc/kubernetes/breadcrumb/toc.json + - name: Move between regions + href: move-regions.md - name: Troubleshooting href: troubleshooting.md - name: Reference diff --git a/articles/azure-arc/kubernetes/tutorial-akv-secrets-provider.md b/articles/azure-arc/kubernetes/tutorial-akv-secrets-provider.md index 07b3f54cc699..26db327d28a6 100644 --- a/articles/azure-arc/kubernetes/tutorial-akv-secrets-provider.md +++ b/articles/azure-arc/kubernetes/tutorial-akv-secrets-provider.md @@ -1,69 +1,75 @@ --- -title: Azure Key Vault Secrets Provider extension -description: Tutorial for setting up Azure Key Vault provider for Secrets Store CSI Driver interface as an extension on Azure Arc enabled Kubernetes cluster +title: Use Azure Key Vault Secrets Provider extension to fetch secrets into Azure Arc-enabled Kubernetes clusters +description: Learn how to set up the Azure Key Vault Provider for Secrets Store CSI Driver interface as an extension on Azure Arc enabled Kubernetes cluster services: azure-arc ms.service: azure-arc -ms.date: 5/13/2022 +ms.date: 5/26/2022 ms.topic: article author: mayurigupta13 ms.author: mayg --- -# Using Azure Key Vault Secrets Provider extension to fetch secrets into Arc clusters +# Use the Azure Key Vault Secrets Provider extension to fetch secrets into Azure Arc-enabled Kubernetes clusters -The Azure Key Vault Provider for Secrets Store CSI Driver allows for the integration of Azure Key Vault as a secrets store with a Kubernetes cluster via a [CSI volume](https://kubernetes-csi.github.io/docs/). +The Azure Key Vault Provider for Secrets Store CSI Driver allows for the integration of Azure Key Vault as a secrets store with a Kubernetes cluster via a [CSI volume](https://kubernetes-csi.github.io/docs/). For Azure Arc-enabled Kubernetes clusters, you can install the Azure Key Vault Secrets Provider extension to fetch secrets. -## Prerequisites -1. Ensure you have met all the common prerequisites for cluster extensions listed [here](extensions.md#prerequisites). -2. Use az k8s-extension CLI version >= v0.4.0 - -### Support limitations for Azure Key Vault (AKV) secrets provider extension -- Following Kubernetes distributions are currently supported - - Cluster API Azure - - Azure Kubernetes Service on Azure Stack HCI (AKS-HCI) - - Google Kubernetes Engine - - OpenShift Kubernetes Distribution - - Canonical Kubernetes Distribution - - Elastic Kubernetes Service - - Tanzu Kubernetes Grid - - -## Features +Benefits of the Azure Key Vault Secrets Provider extension include the folllowing: - Mounts secrets/keys/certs to pod using a CSI Inline volume - Supports pod portability with the SecretProviderClass CRD - Supports Linux and Windows containers - Supports sync with Kubernetes Secrets - Supports auto rotation of secrets +- Extension components are deployed to availability zones, making them zone redundant +## Prerequisites -## Install AKV secrets provider extension on an Arc enabled Kubernetes cluster +- A cluster with a supported Kubernetes distribution that has already been [connected to Azure Arc](quickstart-connect-cluster.md). The following Kubernetes distributions are currently supported for this scenario: + - Cluster API Azure + - Azure Kubernetes Service on Azure Stack HCI (AKS-HCI) + - Google Kubernetes Engine + - OpenShift Kubernetes Distribution + - Canonical Kubernetes Distribution + - Elastic Kubernetes Service + - Tanzu Kubernetes Grid +- Ensure you have met the [general prerequisites for cluster extensions](extensions.md#prerequisites). You must use version 0.4.0 or newer of the `k8s-extension` Azure CLI extension. -The following steps assume that you already have a cluster with supported Kubernetes distribution connected to Azure Arc. +## Install the Azure Key Vault Secrets Provider extension on an Arc-enabled Kubernetes cluster -To deploy using Azure portal, go to the cluster's **Extensions** blade under **Settings**. Click on **+Add** button. +You can install the Azure Key Vault Secrets Provider extension on your connected cluster in the Azure portal, by using Azure CLI, or by deploying ARM template. -[![Extensions located under Settings for Arc enabled Kubernetes cluster](media/tutorial-akv-secrets-provider/extension-install-add-button.jpg)](media/tutorial-akv-secrets-provider/extension-install-add-button.jpg#lightbox) +> [!TIP] +> Only one instance of the extension can be deployed on each Azure Arc-enabled Kubernetes cluster. -From the list of available extensions, select the **Azure Key Vault Secrets Provider** to deploy the latest version of the extension. You can also choose to customize the installation through the portal by changing the defaults on **Configuration** tab. +### Azure portal -[![AKV Secrets Provider available as an extension by clicking on Add button on Extensions blade](media/tutorial-akv-secrets-provider/extension-install-new-resource.jpg)](media/tutorial-akv-secrets-provider/extension-install-new-resource.jpg#lightbox) +1. In the [Azure portal](https://portal/azure.com), navigate to **Kubernetes - Azure Arc** and select your cluster. +1. Select **Extensions** (under **Settings**), and then select **+ Add**. -Alternatively, you can use the CLI experience captured below. + [![Screenshot showing the Extensions page for an Arc-enabled Kubernetes cluster in the Azure portal.](media/tutorial-akv-secrets-provider/extension-install-add-button.jpg)](media/tutorial-akv-secrets-provider/extension-install-add-button.jpg#lightbox) -Set the environment variables: -```azurecli-interactive -export CLUSTER_NAME= -export RESOURCE_GROUP= -``` +1. From the list of available extensions, select **Azure Key Vault Secrets Provider** to deploy the latest version of the extension. -```azurecli-interactive -az k8s-extension create --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --extension-type Microsoft.AzureKeyVaultSecretsProvider --name akvsecretsprovider -``` + [![Screenshot of the Azure Key Vault Secrets Provider extension in the Azure portal.](media/tutorial-akv-secrets-provider/extension-install-new-resource.jpg)](media/tutorial-akv-secrets-provider/extension-install-new-resource.jpg) + +1. Follow the prompts to deploy the extension. If needed, you can customize the installation by changing the default options on the **Configuration** tab. + +### Azure CLI + +1. Set the environment variables: + + ```azurecli-interactive + export CLUSTER_NAME= + export RESOURCE_GROUP= + ``` + +2. Install the Secrets Store CSI Driver and the Azure Key Vault Secrets Provider extension by running the following command: -The above will install the Secrets Store CSI Driver and the Azure Key Vault Provider on your cluster nodes. You should see output similar to the output shown below. It may take 3-5 minutes for the actual AKV secrets provider helm chart to get deployed to the cluster. + ```azurecli-interactive + az k8s-extension create --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --extension-type Microsoft.AzureKeyVaultSecretsProvider --name akvsecretsprovider + ``` -Note that only one instance of AKV secrets provider extension can be deployed on an Arc connected Kubernetes cluster. +You should see output similar to the example below. Note that it may take several minutes before the secrets provider Helm chart is deployed to the cluster. ```json { @@ -106,88 +112,93 @@ Note that only one instance of AKV secrets provider extension can be deployed on } ``` -### Install AKV secrets provider extension using ARM template -After connecting your cluster to Azure Arc, create a json file with the following format, making sure to update the \ value: - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "ConnectedClusterName": { - "defaultValue": "", - "type": "String", - "metadata": { - "description": "The Connected Cluster name." - } - }, - "ExtensionInstanceName": { - "defaultValue": "akvsecretsprovider", - "type": "String", - "metadata": { - "description": "The extension instance name." - } - }, - "ExtensionVersion": { - "defaultValue": "", - "type": "String", - "metadata": { - "description": "The version of the extension type." - } - }, - "ExtensionType": { - "defaultValue": "Microsoft.AzureKeyVaultSecretsProvider", - "type": "String", - "metadata": { - "description": "The extension type." - } - }, - "ReleaseTrain": { - "defaultValue": "stable", - "type": "String", - "metadata": { - "description": "The release train." - } - } - }, - "functions": [], - "resources": [ - { - "type": "Microsoft.KubernetesConfiguration/extensions", - "apiVersion": "2021-09-01", - "name": "[parameters('ExtensionInstanceName')]", - "properties": { - "extensionType": "[parameters('ExtensionType')]", - "releaseTrain": "[parameters('ReleaseTrain')]", - "version": "[parameters('ExtensionVersion')]" - }, - "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', parameters('ConnectedClusterName'))]" - } - ] -} -``` -Now set the environment variables: -```azurecli-interactive -export TEMPLATE_FILE_NAME= -export DEPLOYMENT_NAME= -``` - -Finally, run this command to install the AKV secrets provider extension through az CLI: - -```azurecli-interactive -az deployment group create --name $DEPLOYMENT_NAME --resource-group $RESOURCE_GROUP --template-file $TEMPLATE_FILE_NAME -``` -Now, you should be able to view the AKV provider resources and use the extension in your cluster. +### ARM template + +1. Create a .json file using the following format. Be sure to update the \ value to refer to your cluster. + + ```json + { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "ConnectedClusterName": { + "defaultValue": "", + "type": "String", + "metadata": { + "description": "The Connected Cluster name." + } + }, + "ExtensionInstanceName": { + "defaultValue": "akvsecretsprovider", + "type": "String", + "metadata": { + "description": "The extension instance name." + } + }, + "ExtensionVersion": { + "defaultValue": "", + "type": "String", + "metadata": { + "description": "The version of the extension type." + } + }, + "ExtensionType": { + "defaultValue": "Microsoft.AzureKeyVaultSecretsProvider", + "type": "String", + "metadata": { + "description": "The extension type." + } + }, + "ReleaseTrain": { + "defaultValue": "stable", + "type": "String", + "metadata": { + "description": "The release train." + } + } + }, + "functions": [], + "resources": [ + { + "type": "Microsoft.KubernetesConfiguration/extensions", + "apiVersion": "2021-09-01", + "name": "[parameters('ExtensionInstanceName')]", + "properties": { + "extensionType": "[parameters('ExtensionType')]", + "releaseTrain": "[parameters('ReleaseTrain')]", + "version": "[parameters('ExtensionVersion')]" + }, + "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', parameters('ConnectedClusterName'))]" + } + ] + } + ``` + +1. Now set the environment variables by using the following Azure CLI command: + + ```azurecli-interactive + export TEMPLATE_FILE_NAME= + export DEPLOYMENT_NAME= + ``` + +1. Finally, run this Azure CLI command to install the Azure Key Vault Secrets Provider extension: + + ```azurecli-interactive + az deployment group create --name $DEPLOYMENT_NAME --resource-group $RESOURCE_GROUP --template-file $TEMPLATE_FILE_NAME + ``` + +You should now be able to view the secret provider resources and use the extension in your cluster. ## Validate the extension installation -Run the following command. +To confirm successful installation of the Azure Key Vault Secrets Provider extension, run the following command. ```azurecli-interactive az k8s-extension show --cluster-type connectedClusters --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --name akvsecretsprovider ``` -You should see a JSON output similar to the output below: +You should see output similar to the example below. + ```json { "aksAssignedIdentity": null, @@ -229,113 +240,120 @@ You should see a JSON output similar to the output below: } ``` -## Create or use an existing Azure Key Vault +## Create or select an Azure Key Vault + +Next, specify the Azure Key Vault to use with your connected cluster. If you don't already have one, create a new Key Vault by using the following commands. Keep in mind that the name of your Key Vault must be globally unique. + +```azurecli +az keyvault create -n $AZUREKEYVAULT_NAME -g $AKV_RESOURCE_GROUP -l $AZUREKEYVAULT_LOCATION + +Next, set the following environment variables: -Set the environment variables: ```azurecli-interactive export AKV_RESOURCE_GROUP= export AZUREKEYVAULT_NAME= export AZUREKEYVAULT_LOCATION= ``` -You will need an Azure Key Vault resource containing the secret content. Keep in mind that the Key Vault's name must be globally unique. - -```azurecli -az keyvault create -n $AZUREKEYVAULT_NAME -g $AKV_RESOURCE_GROUP -l $AZUREKEYVAULT_LOCATION -``` - -Azure Key Vault can store keys, secrets, and certificates. In this example, we'll set a plain text secret called `DemoSecret`: +Azure Key Vault can store keys, secrets, and certificates. For this example, you can set a plain text secret called `DemoSecret` by using the following command: ```azurecli az keyvault secret set --vault-name $AZUREKEYVAULT_NAME -n DemoSecret --value MyExampleSecret ``` -Take note of the following properties for use in the next section: +Before you move on to the next section, take note of the following properties: -- Name of secret object in Key Vault +- Name of the secret object in Key Vault - Object type (secret, key, or certificate) -- Name of your Azure Key Vault resource -- Azure Tenant ID the Subscription belongs to +- Name of your Key Vault resource +- The Azure Tenant ID for the subscription to which the Key Vault belongs ## Provide identity to access Azure Key Vault -The Secrets Store CSI Driver on Arc connected clusters currently allows for the following methods to access an Azure Key Vault instance: -- Service Principal - -Follow the steps below to provide identity to access Azure Key Vault +Currently, the Secrets Store CSI Driver on Arc-enabled clusters can be accessed through a service principal. Follow the steps below to provide an identity that can access your Key Vault. 1. Follow the steps [here](../../active-directory/develop/howto-create-service-principal-portal.md#register-an-application-with-azure-ad-and-create-a-service-principal) to create a service principal in Azure. Take note of the Client ID and Client Secret generated in this step. -2. Provide Azure Key Vault GET permission to the created service principal by following the steps [here](../../key-vault/general/assign-access-policy.md). -3. Use the client ID and Client Secret from step 1 to create a Kubernetes secret on the Arc connected cluster: -```bash -kubectl create secret generic secrets-store-creds --from-literal clientid="" --from-literal clientsecret="" -``` -4. Label the created secret: -```bash -kubectl label secret secrets-store-creds secrets-store.csi.k8s.io/used=true -``` -5. Create a SecretProviderClass with the following YAML, filling in your values for key vault name, tenant ID, and objects to retrieve from your AKV instance: -```yml -# This is a SecretProviderClass example using service principal to access Keyvault -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: akvprovider-demo -spec: - provider: azure - parameters: - usePodIdentity: "false" - keyvaultName: - objects: | - array: - - | - objectName: DemoSecret - objectType: secret # object types: secret, key or cert - objectVersion: "" # [OPTIONAL] object versions, default to latest if empty - tenantId: # The tenant ID of the Azure Key Vault instance -``` -6. Apply the SecretProviderClass to your cluster: - -```bash -kubectl apply -f secretproviderclass.yaml -``` -7. Create a pod with the following YAML, filling in the name of your identity: - -```yml -# This is a sample pod definition for using SecretProviderClass and service principal to access Keyvault -kind: Pod -apiVersion: v1 -metadata: - name: busybox-secrets-store-inline -spec: - containers: - - name: busybox - image: k8s.gcr.io/e2e-test-images/busybox:1.29 - command: - - "/bin/sleep" - - "10000" - volumeMounts: - - name: secrets-store-inline - mountPath: "/mnt/secrets-store" - readOnly: true - volumes: - - name: secrets-store-inline - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: "akvprovider-demo" - nodePublishSecretRef: - name: secrets-store-creds -``` -8. Apply the pod to your cluster: - -```bash -kubectl apply -f pod.yaml -``` +1. Provide Azure Key Vault GET permission to the created service principal by following the steps [here](../../key-vault/general/assign-access-policy.md). +1. Use the client ID and Client Secret from step 1 to create a Kubernetes secret on the Arc connected cluster: + + ```bash + kubectl create secret generic secrets-store-creds --from-literal clientid="" --from-literal clientsecret="" + ``` + +1. Label the created secret: + + ```bash + kubectl label secret secrets-store-creds secrets-store.csi.k8s.io/used=true + ``` + +1. Create a SecretProviderClass with the following YAML, filling in your values for key vault name, tenant ID, and objects to retrieve from your AKV instance: + + ```yml + # This is a SecretProviderClass example using service principal to access Keyvault + apiVersion: secrets-store.csi.x-k8s.io/v1 + kind: SecretProviderClass + metadata: + name: akvprovider-demo + spec: + provider: azure + parameters: + usePodIdentity: "false" + keyvaultName: + objects: | + array: + - | + objectName: DemoSecret + objectType: secret # object types: secret, key or cert + objectVersion: "" # [OPTIONAL] object versions, default to latest if empty + tenantId: # The tenant ID of the Azure Key Vault instance + ``` + +1. Apply the SecretProviderClass to your cluster: + + ```bash + kubectl apply -f secretproviderclass.yaml + ``` + +1. Create a pod with the following YAML, filling in the name of your identity: + + ```yml + # This is a sample pod definition for using SecretProviderClass and service principal to access Keyvault + kind: Pod + apiVersion: v1 + metadata: + name: busybox-secrets-store-inline + spec: + containers: + - name: busybox + image: k8s.gcr.io/e2e-test-images/busybox:1.29 + command: + - "/bin/sleep" + - "10000" + volumeMounts: + - name: secrets-store-inline + mountPath: "/mnt/secrets-store" + readOnly: true + volumes: + - name: secrets-store-inline + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "akvprovider-demo" + nodePublishSecretRef: + name: secrets-store-creds + ``` + +1. Apply the pod to your cluster: + + ```bash + kubectl apply -f pod.yaml + ``` ## Validate the secrets + After the pod starts, the mounted content at the volume path specified in your deployment YAML is available. + ```Bash ## show secrets held in secrets-store kubectl exec busybox-secrets-store-inline -- ls /mnt/secrets-store/ @@ -345,53 +363,53 @@ kubectl exec busybox-secrets-store-inline -- cat /mnt/secrets-store/DemoSecret ``` ## Additional configuration options -Following configuration settings are available for Azure Key Vault secrets provider extension: + +The following configuration settings are available for the Azure Key Vault Secrets Provider extension: | Configuration Setting | Default | Description | | --------- | ----------- | ----------- | -| enableSecretRotation | false | Boolean type; Periodically update the pod mount and Kubernetes Secret with the latest content from external secrets store | -| rotationPollInterval | 2m | Secret rotation poll interval duration if `enableSecretRotation` is `true`. This can be tuned based on how frequently the mounted contents for all pods and Kubernetes secrets need to be resynced to the latest | -| syncSecret.enabled | false | Boolean input; In some cases, you may want to create a Kubernetes Secret to mirror the mounted content. This configuration setting allows SecretProviderClass to allow secretObjects field to define the desired state of the synced Kubernetes secret objects | +| enableSecretRotation | false | Boolean type. If `true`, periodically updates the pod mount and Kubernetes Secret with the latest content from external secrets store | +| rotationPollInterval | 2m | Specifies the secret rotation poll interval duration if `enableSecretRotation` is `true`. This duration can be adjusted based on how frequently the mounted contents for all pods and Kubernetes secrets need to be resynced to the latest. | +| syncSecret.enabled | false | Boolean input. In some cases, you may want to create a Kubernetes Secret to mirror the mounted content. If `true`, `SecretProviderClass` allows the `secretObjects` field to define the desired state of the synced Kubernetes Secret objects. | -These settings can be changed either at the time of extension installation using `az k8s-extension create` command or post installation using `az k8s-extension update` command. +These settings can be specified when the extension is installed by using the `az k8s-extension create` command: -Use following command to add configuration settings while creating extension instance: ```azurecli-interactive az k8s-extension create --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --extension-type Microsoft.AzureKeyVaultSecretsProvider --name akvsecretsprovider --configuration-settings secrets-store-csi-driver.enableSecretRotation=true secrets-store-csi-driver.rotationPollInterval=3m secrets-store-csi-driver.syncSecret.enabled=true ``` -Use following command to update configuration settings of existing extension instance: +You can also change the settings after installation by using the `az k8s-extension update` command: + ```azurecli-interactive az k8s-extension update --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --name akvsecretsprovider --configuration-settings secrets-store-csi-driver.enableSecretRotation=true secrets-store-csi-driver.rotationPollInterval=3m secrets-store-csi-driver.syncSecret.enabled=true ``` -## Uninstall Azure Key Vault secrets provider extension -Use the below command: +## Uninstall the Azure Key Vault Secrets Provider extension + +To uninstall the extension, run the following command: + ```azurecli-interactive az k8s-extension delete --cluster-type connectedClusters --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --name akvsecretsprovider ``` -Note that the uninstallation does not delete the CRDs that are created at the time of extension installation. -Verify that the extension instance has been deleted. +> [!NOTE] +> Uninstalling the extension doesn't delete the Custom Resource Definitions (CRDs) that were created when the extension was installed. + +To confirm that the extension instance has been deleted, run the following command: + ```azurecli-interactive az k8s-extension list --cluster-type connectedClusters --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP ``` -This output should not include AKV secrets provider. If you don't have any other extensions installed on your cluster, it will just be an empty array. - -## Reconciliation and Troubleshooting -Azure Key Vault secrets provider extension is self-healing. All extension components that are deployed on the cluster at the time of extension installation are reconciled to their original state in case somebody tries to intentionally or unintentionally change or delete them. The only exception to that is CRDs. In case the CRDs are deleted, they are not reconciled. You can bring them back by using the 'az k8s-exstension create' command again and providing the existing extension instance name. - -Some common issues and troubleshooting steps for Azure Key Vault secrets provider are captured in the open source documentation [here](https://azure.github.io/secrets-store-csi-driver-provider-azure/docs/troubleshooting/) for your reference. -Additional troubleshooting steps that are specific to the Secrets Store CSI Driver Interface can be referenced [here](https://secrets-store-csi-driver.sigs.k8s.io/troubleshooting.html). +If the extension was successfully removed, you won't see the the Azure Key Vault Secrets Provider extension listed in the output. If you don't have any other extensions installed on your cluster, you'll see an empty array. -## Frequently asked questions +## Reconciliation and troubleshooting -### Is the extension of Azure Key Vault Secrets Provider zone redundant? +The Azure Key Vault Secrets Provider extension is self-healing. If somebody tries to change or delete an extension component that was deployed when the extension was installed, that component will be reconciled to its original state. The only exceptions are for Custom Resource Definitions (CRDs). If CRDs are deleted, they won't be reconciled. To restore deleted CRDs, use the `az k8s-exstension create` command again with the existing extension instance name. -Yes, all components of Azure Key Vault Secrets Provider are deployed on availability zones and are hence zone redundant. +For more information about resolving common issues, see the open source troubleshooting guides for [Azure Key Vault provider for Secrets Store CSI driver](https://azure.github.io/secrets-store-csi-driver-provider-azure/docs/troubleshooting/) and [Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/troubleshooting.html). ## Next steps -> **Just want to try things out?** -> Get started quickly with an [Azure Arc Jumpstart scenario](https://aka.ms/arc-jumpstart-akv-secrets-provider) using Cluster API. +- Want to try things out? Get started quickly with an [Azure Arc Jumpstart scenario](https://aka.ms/arc-jumpstart-akv-secrets-provider) using Cluster API. +- Learn more about [Azure Key Vault](/azure/key-vault/general/overview). diff --git a/articles/azure-arc/kubernetes/tutorial-arc-enabled-open-service-mesh.md b/articles/azure-arc/kubernetes/tutorial-arc-enabled-open-service-mesh.md index 6a4f826cbd77..da55c04868fb 100644 --- a/articles/azure-arc/kubernetes/tutorial-arc-enabled-open-service-mesh.md +++ b/articles/azure-arc/kubernetes/tutorial-arc-enabled-open-service-mesh.md @@ -2,7 +2,7 @@ title: Azure Arc-enabled Open Service Mesh description: Open Service Mesh (OSM) extension on Azure Arc-enabled Kubernetes cluster ms.service: azure-arc -ms.date: 05/02/2022 +ms.date: 05/25/2022 ms.topic: article author: mayurigupta13 ms.author: mayg @@ -26,7 +26,7 @@ Azure Arc-enabled Open Service Mesh can be deployed through Azure portal, Azure ### Current support limitations - Only one instance of Open Service Mesh can be deployed on an Azure Arc-connected Kubernetes cluster. -- Support is available for Azure Arc-enabled Open Service Mesh version v1.0.0-1 and above. Find the latest version [here](https://github.com/Azure/osm-azure/releases). Supported release versions are appended with notes. Ignore the tags associated with intermediate releases. +- Support is available for the two most recently released minor versions of Arc-enabled Open Service Mesh. Find the latest version [here](https://github.com/Azure/osm-azure/releases). Supported release versions are appended with notes. Ignore the tags associated with intermediate releases. - The following Kubernetes distributions are currently supported: - AKS Engine - AKS on HCI diff --git a/articles/azure-arc/servers/index.yml b/articles/azure-arc/servers/index.yml index 520c7201a941..20a2eb8ab319 100644 --- a/articles/azure-arc/servers/index.yml +++ b/articles/azure-arc/servers/index.yml @@ -100,4 +100,4 @@ landingContent: - text: Microsoft Sentinel url: scenario-onboard-azure-sentinel.md - text: Microsoft Defender for Cloud - url: /azure/defender-for-cloud/quickstart-onboard-machines?toc=%2Fazure%2Fazure-arc%2Fservers%2Ftoc.json&bc=%2Fazure%2Fazure-arc%2Fservers%2Fbreadcrumb%2Ftoc.json&pivots=azure-arc + url: ../../defender-for-cloud/quickstart-onboard-machines.md?bc=%2fazure%2fazure-arc%2fservers%2fbreadcrumb%2ftoc.json&pivots=azure-arc&toc=%2fazure%2fazure-arc%2fservers%2ftoc.json \ No newline at end of file diff --git a/articles/azure-arc/servers/onboard-group-policy.md b/articles/azure-arc/servers/onboard-group-policy.md index 07decb152416..7733e88751f9 100644 --- a/articles/azure-arc/servers/onboard-group-policy.md +++ b/articles/azure-arc/servers/onboard-group-policy.md @@ -1,7 +1,7 @@ --- title: Connect machines at scale using group policy description: In this article, you learn how to connect machines to Azure using Azure Arc-enabled servers using group policy. -ms.date: 04/29/2022 +ms.date: 05/25/2022 ms.topic: conceptual ms.custom: template-how-to --- @@ -35,15 +35,15 @@ Before you can run the script to connect your machines, you'll need to do the fo 1. Modify and save the following configuration file to the remote share as `ArcConfig.json`. Edit the file with your Azure subscription, resource group, and location details. Use the service principal details from step 1 for the last two fields: -``` +```json { - "tenant-id": "INSERT AZURE TENANTID", - "subscription-id": "INSERT AZURE SUBSCRIPTION ID", - "resource-group": "INSERT RESOURCE GROUP NAME", - "location": "INSERT REGION", - "service-principal-id": "INSERT SPN ID", - "service-principal-secret": "INSERT SPN Secret" - } + "tenant-id": "INSERT AZURE TENANTID", + "subscription-id": "INSERT AZURE SUBSCRIPTION ID", + "resource-group": "INSERT RESOURCE GROUP NAME", + "location": "INSERT REGION", + "service-principal-id": "INSERT SPN ID", + "service-principal-secret": "INSERT SPN Secret" + } ``` The group policy will project machines as Arc-enabled servers in the Azure subscription, resource group, and region specified in this configuration file. diff --git a/articles/azure-arc/servers/overview.md b/articles/azure-arc/servers/overview.md index e6d0164ff8f6..c35fa9593a77 100644 --- a/articles/azure-arc/servers/overview.md +++ b/articles/azure-arc/servers/overview.md @@ -36,12 +36,12 @@ When you connect your machine to Azure Arc-enabled servers, you can perform many * Perform post-deployment configuration and automation tasks using supported [Arc-enabled servers VM extensions](manage-vm-extensions.md) for your non-Azure Windows or Linux machine. * **Monitor**: * Monitor operating system performance and discover application components to monitor processes and dependencies with other resources using [VM insights](../../azure-monitor/vm/vminsights-overview.md). - * Collect other log data, such as performance data and events, from the operating system or workloads running on the machine with the [Log Analytics agent](../../azure-monitor/agents/agents-overview.md#log-analytics-agent). This data is stored in a [Log Analytics workspace](../../azure-monitor/logs/design-logs-deployment.md). + * Collect other log data, such as performance data and events, from the operating system or workloads running on the machine with the [Log Analytics agent](../../azure-monitor/agents/agents-overview.md#log-analytics-agent). This data is stored in a [Log Analytics workspace](../../azure-monitor/logs/log-analytics-workspace-overview.md). > [!NOTE] > At this time, enabling Azure Automation Update Management directly from an Azure Arc-enabled server is not supported. See [Enable Update Management from your Automation account](../../automation/update-management/enable-from-automation-account.md) to understand requirements and [how to enable Update Management for non-Azure VMs](../../automation/update-management/enable-from-automation-account.md#enable-non-azure-vms). -Log data collected and stored in a Log Analytics workspace from the hybrid machine contains properties specific to the machine, such as a Resource ID, to support [resource-context](../../azure-monitor/logs/design-logs-deployment.md#access-mode) log access. +Log data collected and stored in a Log Analytics workspace from the hybrid machine contains properties specific to the machine, such as a Resource ID, to support [resource-context](../../azure-monitor/logs/manage-access.md#access-mode) log access. Watch this video to learn more about Azure monitoring, security, and update services across hybrid and multicloud environments. diff --git a/articles/azure-arc/servers/plan-at-scale-deployment.md b/articles/azure-arc/servers/plan-at-scale-deployment.md index 78197ee97749..7f78faf664fd 100644 --- a/articles/azure-arc/servers/plan-at-scale-deployment.md +++ b/articles/azure-arc/servers/plan-at-scale-deployment.md @@ -61,7 +61,7 @@ In this phase, system engineers or administrators enable the core features in th |-----|-------|---------| | [Create a resource group](../../azure-resource-manager/management/manage-resource-groups-portal.md#create-resource-groups) | A dedicated resource group to include only Azure Arc-enabled servers and centralize management and monitoring of these resources. | One hour | | Apply [Tags](../../azure-resource-manager/management/tag-resources.md) to help organize machines. | Evaluate and develop an IT-aligned [tagging strategy](/azure/cloud-adoption-framework/decision-guides/resource-tagging/) that can help reduce the complexity of managing your Azure Arc-enabled servers and simplify making management decisions. | One day | -| Design and deploy [Azure Monitor Logs](../../azure-monitor/logs/data-platform-logs.md) | Evaluate [design and deployment considerations](../../azure-monitor/logs/design-logs-deployment.md) to determine if your organization should use an existing or implement another Log Analytics workspace to store collected log data from hybrid servers and machines.1 | One day | +| Design and deploy [Azure Monitor Logs](../../azure-monitor/logs/data-platform-logs.md) | Evaluate [design and deployment considerations](../../azure-monitor/logs/workspace-design.md) to determine if your organization should use an existing or implement another Log Analytics workspace to store collected log data from hybrid servers and machines.1 | One day | | [Develop an Azure Policy](../../governance/policy/overview.md) governance plan | Determine how you will implement governance of hybrid servers and machines at the subscription or resource group scope with Azure Policy. | One day | | Configure [Role based access control](../../role-based-access-control/overview.md) (RBAC) | Develop an access plan to control who has access to manage Azure Arc-enabled servers and ability to view their data from other Azure services and solutions. | One day | | Identify machines with Log Analytics agent already installed | Run the following log query in [Log Analytics](../../azure-monitor/logs/log-analytics-overview.md) to support conversion of existing Log Analytics agent deployments to extension-managed agent:
    Heartbeat
    | summarize arg_max(TimeGenerated, OSType, ResourceId, ComputerEnvironment) by Computer
    | where ComputerEnvironment == "Non-Azure" and isempty(ResourceId)
    | project Computer, OSType | One hour | diff --git a/articles/azure-arc/servers/scenario-onboard-azure-sentinel.md b/articles/azure-arc/servers/scenario-onboard-azure-sentinel.md index 44386b93ba22..a8cb8149b1f3 100644 --- a/articles/azure-arc/servers/scenario-onboard-azure-sentinel.md +++ b/articles/azure-arc/servers/scenario-onboard-azure-sentinel.md @@ -13,7 +13,7 @@ This article is intended to help you onboard your Azure Arc-enabled server to [M Before you start, make sure that you've met the following requirements: -- A [Log Analytics workspace](../../azure-monitor/logs/data-platform-logs.md). For more information about Log Analytics workspaces, see [Designing your Azure Monitor Logs deployment](../../azure-monitor/logs/design-logs-deployment.md). +- A [Log Analytics workspace](../../azure-monitor/logs/data-platform-logs.md). For more information about Log Analytics workspaces, see [Designing your Azure Monitor Logs deployment](../../azure-monitor/logs/workspace-design.md). - Microsoft Sentinel [enabled in your subscription](../../sentinel/quickstart-onboard.md). diff --git a/articles/azure-cache-for-redis/TOC.yml b/articles/azure-cache-for-redis/TOC.yml index 928c25ee830d..106b2d0751a6 100644 --- a/articles/azure-cache-for-redis/TOC.yml +++ b/articles/azure-cache-for-redis/TOC.yml @@ -151,9 +151,15 @@ href: cache-how-to-manage-redis-cache-powershell.md - name: Deploy and Manage using Azure CLI href: cli-samples.md + - name: Create Redis cache - Bicep + displayName: ARM, Resource Manager, Template + href: cache-redis-cache-bicep-provision.md - name: Create Redis cache - ARM template displayName: Resource Manager href: cache-redis-cache-arm-provision.md + - name: Create Web App with Redis cache - Bicep + displayName: ARM, Resource Manager, Template + href: cache-web-app-bicep-with-redis-cache-provision.md - name: Create Web App with Redis cache - ARM template displayName: Resource Manager href: cache-web-app-arm-with-redis-cache-provision.md diff --git a/articles/azure-cache-for-redis/cache-how-to-geo-replication.md b/articles/azure-cache-for-redis/cache-how-to-geo-replication.md index 8015b9fda609..d0f40a6ee5e0 100644 --- a/articles/azure-cache-for-redis/cache-how-to-geo-replication.md +++ b/articles/azure-cache-for-redis/cache-how-to-geo-replication.md @@ -4,13 +4,13 @@ description: Learn how to replicate your Azure Cache for Redis Premium instances author: flang-msft ms.service: cache ms.topic: conceptual -ms.date: 02/08/2021 +ms.date: 05/24/2022 ms.author: franlanglois --- # Configure geo-replication for Premium Azure Cache for Redis instances -In this article, you'll learn how to configure a geo-replicated Azure Cache using the Azure portal. +In this article, you learn how to configure a geo-replicated Azure Cache using the Azure portal. Geo-replication links together two Premium Azure Cache for Redis instances and creates a data replication relationship. These cache instances are typically located in different Azure regions, though that isn't required. One instance acts as the primary, and the other as the secondary. The primary handles read and write requests and propagate changes to the secondary. This process continues until the link between the two instances is removed. @@ -154,6 +154,8 @@ Yes, geo-replication of caches in VNets is supported with caveats: - Geo-replication between caches in different VNets is also supported. - If the VNets are in the same region, you can connect them using [VNet peering](../virtual-network/virtual-network-peering-overview.md) or a [VPN Gateway VNet-to-VNet connection](../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). - If the VNets are in different regions, geo-replication using VNet peering is supported. A client VM in VNet 1 (region 1) isn't able to access the cache in VNet 2 (region 2) using its DNS name because of a constraint with Basic internal load balancers. For more information about VNet peering constraints, see [Virtual Network - Peering - Requirements and constraints](../virtual-network/virtual-network-manage-peering.md#requirements-and-constraints). We recommend using a VPN Gateway VNet-to-VNet connection. + +To configure your VNet effectively and avoid geo-replication issues, you must configure both the inbound and outbound ports correctly. For more information on avoiding the most common VNet misconfiguration issues, see [Geo-replication peer port requirements](cache-how-to-premium-vnet.md#geo-replication-peer-port-requirements). Using [this Azure template](https://azure.microsoft.com/resources/templates/redis-vnet-geo-replication/), you can quickly deploy two geo-replicated caches into a VNet connected with a VPN Gateway VNet-to-VNet connection. diff --git a/articles/azure-cache-for-redis/cache-redis-cache-bicep-provision.md b/articles/azure-cache-for-redis/cache-redis-cache-bicep-provision.md new file mode 100644 index 000000000000..541d6607228b --- /dev/null +++ b/articles/azure-cache-for-redis/cache-redis-cache-bicep-provision.md @@ -0,0 +1,99 @@ +--- +title: Deploy Azure Cache for Redis using Bicep +description: Learn how to use Bicep to deploy an Azure Cache for Redis resource. +author: schaffererin +ms.author: v-eschaffer +ms.service: cache +ms.topic: conceptual +ms.custom: subject-armqs, devx-track-azurepowershell +ms.date: 05/24/2022 +--- + +# Quickstart: Create an Azure Cache for Redis using Bicep + +Learn how to use Bicep to deploy a cache using Azure Cache for Redis. After you deploy the cache, use it with an existing storage account to keep diagnostic data. Learn how to define which resources are deployed and how to define parameters that are specified when the deployment is executed. You can use this Bicep file for your own deployments, or customize it to meet your requirements. + +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Prerequisites + +* **Azure subscription**: If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/) before you begin. +* **A storage account**: To create one, see [Create an Azure Storage account](../storage/common/storage-account-create.md?tabs=azure-portal). The storage account is used for diagnostic data. Create the storage account in a new resource group named **exampleRG**. + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/redis-cache/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.cache/redis-cache/main.bicep"::: + +The following resources are defined in the Bicep file: + +* [Microsoft.Cache/Redis](/azure/templates/microsoft.cache/redis) +* [Microsoft.Insights/diagnosticsettings](/azure/templates/microsoft.insights/diagnosticsettings) + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az deployment group create --resource-group exampleRG --template-file main.bicep --parameters existingDiagnosticsStorageAccountName= existingDiagnosticsStorageAccountResourceGroup= + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep -existingDiagnosticsStorageAccountName "" -existingDiagnosticsStorageAccountResourceGroup "" + ``` + + --- + + > [!NOTE] + > Replace **\** with the name of the storage account you created at the beginning of this quickstart. Replace **\** with the name of the resource group name in which your storage account is located. + + When the deployment finishes, you see a message indicating the deployment succeeded. + +## Review deployed resources + +Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Clean up resources + +When no longer needed, delete the resource group, which deletes the resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +In this tutorial, you learned how to use Bicep to deploy a cache using Azure Cache for Redis. To learn more about Azure Cache for Redis and Bicep, see the articles below: + +* Learn more about [Azure Cache for Redis](../azure-cache-for-redis/cache-overview.md). +* Learn more about [Bicep](../../articles/azure-resource-manager/bicep/overview.md). diff --git a/articles/azure-cache-for-redis/cache-web-app-bicep-with-redis-cache-provision.md b/articles/azure-cache-for-redis/cache-web-app-bicep-with-redis-cache-provision.md new file mode 100644 index 000000000000..1a8df36328c3 --- /dev/null +++ b/articles/azure-cache-for-redis/cache-web-app-bicep-with-redis-cache-provision.md @@ -0,0 +1,97 @@ +--- +title: Provision Web App that uses Azure Cache for Redis using Bicep +description: Use Bicep to deploy web app with Azure Cache for Redis. +author: schaffererin +ms.service: app-service +ms.topic: conceptual +ms.date: 05/24/2022 +ms.author: v-eschaffer +ms.custom: devx-track-azurepowershell + +--- +# Create a Web App plus Azure Cache for Redis using Bicep + +In this article, you use Bicep to deploy an Azure Web App that uses Azure Cache for Redis, as well as an App Service plan. + +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +You can use this Bicep file for your own deployments. The Bicep file provides unique names for the Azure Web App, the App Service plan, and the Azure Cache for Redis. If you'd like, you can customize the Bicep file after you save it to your local device to meet your requirements. + +For more information about creating Bicep files, see [Quickstart: Create Bicep files with Visual Studio Code](../azure-resource-manager/bicep/quickstart-create-bicep-use-visual-studio-code.md). To learn about Bicep syntax, see [Understand the structure and syntax of Bicep files](../azure-resource-manager/bicep/file.md). + +## Review the Bicep file + +The Bicep file used in this quickstart is from [Azure Quickstart Templates](https://github.com/Azure/azure-quickstart-templates/blob/master/quickstarts/microsoft.web/web-app-with-redis-cache/). + +:::code language="bicep" source="~/quickstart-templates/quickstarts/microsoft.web/web-app-with-redis-cache/main.bicep"::: + +With this Bicep file, you deploy: + +* [**Microsoft.Cache/Redis**](/azure/templates/microsoft.cache/redis) +* [**Microsoft.Web/sites**](/azure/templates/microsoft.web/sites) +* [**Microsoft.Web/serverfarms**](/azure/templates/microsoft.web/serverfarms) + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. +1. Deploy the Bicep file using either Azure CLI or Azure PowerShell. + + # [CLI](#tab/CLI) + + ```azurecli + az group create --name exampleRG --location eastus + az deployment group create --resource-group exampleRG --template-file main.bicep + ``` + + # [PowerShell](#tab/PowerShell) + + ```azurepowershell + New-AzResourceGroup -Name exampleRG -Location eastus + New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep + ``` + + --- + + When the deployment finishes, you should see a message indicating the deployment succeeded. + +## Review deployed resources + +Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az resource list --resource-group exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Get-AzResource -ResourceGroupName exampleRG +``` + +--- + +## Clean up resources + +When no longer needed, use the Azure portal, Azure CLI, or Azure PowerShell to delete the resource group and its resources. + +# [CLI](#tab/CLI) + +```azurecli-interactive +az group delete --name exampleRG +``` + +# [PowerShell](#tab/PowerShell) + +```azurepowershell-interactive +Remove-AzResourceGroup -Name exampleRG +``` + +--- + +## Next steps + +To learn more about Bicep, continue to the following article: + +* [Bicep overview](../azure-resource-manager/bicep/overview.md) diff --git a/articles/azure-functions/start-stop-vms/deploy.md b/articles/azure-functions/start-stop-vms/deploy.md index ee16d525c44b..324b42cdb2b9 100644 --- a/articles/azure-functions/start-stop-vms/deploy.md +++ b/articles/azure-functions/start-stop-vms/deploy.md @@ -31,8 +31,27 @@ To simplify management and removal, we recommend you deploy Start/Stop VMs v2 (p > The naming format for the function app and storage account has changed. To guarantee global uniqueness, a random and unique string is now appended to the names of these resource. 1. Open your browser and navigate to the Start/Stop VMs v2 [GitHub organization](https://github.com/microsoft/startstopv2-deployments/blob/main/README.md). -1. Select the deployment option based on the Azure cloud environment your Azure VMs are created in. This will open the custom Azure Resource Manager deployment page in the Azure portal. +1. Select the deployment option based on the Azure cloud environment your Azure VMs are created in. 1. If prompted, sign in to the [Azure portal](https://portal.azure.com). +1. Choose the appropriate **Plan** from the drop-down box. When choosing a Zone Redundant plan (**Start/StopV2-AZ**), you must create your deployment in one of the following regions: + + Australia East + + Brazil South + + Canada Central + + Central US + + East US + + East US 2 + + France Central + + Germany West Central + + Japan East + + North Europe + + Southeast Asia + + UK South + + West Europe + + West US 2 + + West US 3 + +1. Select **Create**, which opens the custom Azure Resource Manager deployment page in the Azure portal. + 1. Enter the following values: |Name |Value | diff --git a/articles/azure-maps/authentication-best-practices.md b/articles/azure-maps/authentication-best-practices.md index bfc8d8c450b0..64e14e33f56b 100644 --- a/articles/azure-maps/authentication-best-practices.md +++ b/articles/azure-maps/authentication-best-practices.md @@ -14,7 +14,7 @@ services: azure-maps The single most important part of your application is its security. No matter how good the user experience might be, if your application isn't secure a hacker can ruin it. -The following are some tips to keep your Azure Maps application secure. When using Azure, be sure to familiarize yourself with the security tools available to you. For more information, See the [introduction to Azure security](/azure/security/fundamentals/overview). +The following are some tips to keep your Azure Maps application secure. When using Azure, be sure to familiarize yourself with the security tools available to you. For more information, See the [introduction to Azure security](../security/fundamentals/overview.md). ## Understanding security threats @@ -32,17 +32,17 @@ When creating a publicly facing client application with Azure Maps using any of Subscription key-based authentication (Shared Key) can be used in either client side applications or web services, however it is the least secure approach to securing your application or web service. This is because the key grants access to all Azure Maps REST API that are available in the SKU (Pricing Tier) selected when creating the Azure Maps account and the key can be easily obtained from an HTTP request. If you do use subscription keys, be sure to [rotate them regularly](how-to-manage-authentication.md#manage-and-rotate-shared-keys) and keep in mind that Shared Key doesn't allow for configurable lifetime, it must be done manually. You should also consider using [Shared Key authentication with Azure Key Vault](how-to-secure-daemon-app.md#scenario-shared-key-authentication-with-azure-key-vault), which enables you to securely store your secret in Azure. -If using [Azure Active Directory (Azure AD) authentication](/azure/active-directory/fundamentals/active-directory-whatis) or [Shared Access Signature (SAS) Token authentication](azure-maps-authentication.md#shared-access-signature-token-authentication) (preview), access to Azure Maps REST APIs is authorized using [role-based access control (RBAC)](azure-maps-authentication.md#authorization-with-role-based-access-control). RBAC enables you to control what access is given to the issued tokens. You should consider how long access should be granted for the tokens. Unlike Shared Key authentication, the lifetime of these tokens is configurable. +If using [Azure Active Directory (Azure AD) authentication](../active-directory/fundamentals/active-directory-whatis.md) or [Shared Access Signature (SAS) Token authentication](azure-maps-authentication.md#shared-access-signature-token-authentication) (preview), access to Azure Maps REST APIs is authorized using [role-based access control (RBAC)](azure-maps-authentication.md#authorization-with-role-based-access-control). RBAC enables you to control what access is given to the issued tokens. You should consider how long access should be granted for the tokens. Unlike Shared Key authentication, the lifetime of these tokens is configurable. > [!TIP] > > For more information on configuring token lifetimes see: -> - [Configurable token lifetimes in the Microsoft identity platform (preview)](/azure/active-directory/develop/active-directory-configurable-token-lifetimes) +> - [Configurable token lifetimes in the Microsoft identity platform (preview)](../active-directory/develop/active-directory-configurable-token-lifetimes.md) > - [Create SAS tokens](azure-maps-authentication.md#create-sas-tokens) ### Public client and confidential client applications -There are different security concerns between public and confidential client applications. See [Public client and confidential client applications](/azure/active-directory/develop/msal-client-applications) in the Microsoft identity platform documentation for more information about what is considered a *public* versus *confidential* client application. +There are different security concerns between public and confidential client applications. See [Public client and confidential client applications](../active-directory/develop/msal-client-applications.md) in the Microsoft identity platform documentation for more information about what is considered a *public* versus *confidential* client application. ### Public client applications @@ -53,7 +53,7 @@ For apps that run on devices or desktop computers or in a web browser, you shoul ### Confidential client applications -For apps that run on servers (such as web services and service/daemon apps), if you prefer to avoid the overhead and complexity of managing secrets, consider [Managed Identities](/azure/active-directory/managed-identities-azure-resources/overview). Managed identities can provide an identity for your web service to use when connecting to Azure Maps using Azure Active Directory (Azure AD) authentication. In this case, your web service will use that identity to obtain the required Azure AD tokens. You should use Azure RBAC to configure what access the web service is given, using the [Least privileged roles](/azure/active-directory/roles/delegate-by-task) possible. +For apps that run on servers (such as web services and service/daemon apps), if you prefer to avoid the overhead and complexity of managing secrets, consider [Managed Identities](../active-directory/managed-identities-azure-resources/overview.md). Managed identities can provide an identity for your web service to use when connecting to Azure Maps using Azure Active Directory (Azure AD) authentication. In this case, your web service will use that identity to obtain the required Azure AD tokens. You should use Azure RBAC to configure what access the web service is given, using the [Least privileged roles](../active-directory/roles/delegate-by-task.md) possible. ## Next steps @@ -64,4 +64,4 @@ For apps that run on servers (such as web services and service/daemon apps), if > [Manage authentication in Azure Maps](how-to-manage-authentication.md) > [!div class="nextstepaction"] -> [Tutorial: Add app authentication to your web app running on Azure App Service](../app-service/scenario-secure-app-authentication-app-service.md) +> [Tutorial: Add app authentication to your web app running on Azure App Service](../app-service/scenario-secure-app-authentication-app-service.md) \ No newline at end of file diff --git a/articles/azure-monitor/agents/agents-overview.md b/articles/azure-monitor/agents/agents-overview.md index 8ac5b6178300..13c0c124ffb2 100644 --- a/articles/azure-monitor/agents/agents-overview.md +++ b/articles/azure-monitor/agents/agents-overview.md @@ -6,7 +6,7 @@ services: azure-monitor ms.topic: conceptual author: bwren ms.author: bwren -ms.date: 05/11/2022 +ms.date: 05/24/2022 --- # Overview of Azure Monitor agents @@ -171,6 +171,7 @@ The following tables list the operating systems that are supported by the Azure | Operating system | Azure Monitor agent 1 | Log Analytics agent 1 | Dependency agent | Diagnostics extension 2| |:---|:---:|:---:|:---:|:---: +| AlmaLinux | X | | | | | Amazon Linux 2017.09 | | X | | | | Amazon Linux 2 | | X | | | | CentOS Linux 8 | X 3 | X | X | | @@ -191,12 +192,14 @@ The following tables list the operating systems that are supported by the Azure | Red Hat Enterprise Linux Server 7 | X | X | X | X | | Red Hat Enterprise Linux Server 6 | | X | X | | | Red Hat Enterprise Linux Server 6.7+ | | X | X | X | +| Rocky Linux | X | | | | | SUSE Linux Enterprise Server 15.2 | X 3 | | | | | SUSE Linux Enterprise Server 15.1 | X 3 | X | | | | SUSE Linux Enterprise Server 15 SP1 | X | X | X | | | SUSE Linux Enterprise Server 15 | X | X | X | | | SUSE Linux Enterprise Server 12 SP5 | X | X | X | X | | SUSE Linux Enterprise Server 12 | X | X | X | X | +| Ubuntu 22.04 LTS | X | | | | | Ubuntu 20.04 LTS | X | X | X | X | | Ubuntu 18.04 LTS | X | X | X | X | | Ubuntu 16.04 LTS | X | X | X | X | diff --git a/articles/azure-monitor/agents/azure-monitor-agent-extension-versions.md b/articles/azure-monitor/agents/azure-monitor-agent-extension-versions.md index 8a299fb0a29d..3d5f33ec934c 100644 --- a/articles/azure-monitor/agents/azure-monitor-agent-extension-versions.md +++ b/articles/azure-monitor/agents/azure-monitor-agent-extension-versions.md @@ -4,7 +4,7 @@ description: This article describes the version details for the Azure Monitor ag ms.topic: conceptual author: shseth ms.author: shseth -ms.date: 5/19/2022 +ms.date: 5/25/2022 ms.custom: references_region --- @@ -18,7 +18,7 @@ We strongly recommended to update to the latest version at all times, or opt in ## Version details | Release Date | Release notes | Windows | Linux | |:---|:---|:---|:---| -| April 2022 |
    • Private IP information added in Log Analytics Heartbeat table for Windows
    • Fixed bugs in Windows IIS log collection (preview)
      • Updated IIS site column name to match backend KQL transform
      • Added delay to IIS upload task to account for IIS buffering
    | 1.4.1.0Hotfix | Coming soon | +| April 2022 |
    • Private IP information added in Log Analytics Heartbeat table for Windows and Linux
    • Fixed bugs in Windows IIS log collection (preview)
      • Updated IIS site column name to match backend KQL transform
      • Added delay to IIS upload task to account for IIS buffering
    • Fixed Linux CEF syslog forwarding for Sentinel
    • Removed 'error' message for Azure MSI token retrieval failure on Arc to show as 'Info' instead
    • Support added for Ubuntu 22.04, AlmaLinux and RockyLinux distros
    | 1.4.1.0Hotfix | 1.19.3 | | March 2022 |
    • Fixed timestamp and XML format bugs in Windows Event logs
    • Full Windows OS information in Log Analytics Heartbeat table
    • Fixed Linux performance counters to collect instance values instead of 'total' only
    | 1.3.0.0 | 1.17.5.0 | | February 2022 |
    • Bugfixes for the AMA Client installer (private preview)
    • Versioning fix to reflect appropriate Windows major/minor/hotfix versions
    • Internal test improvement on Linux
    | 1.2.0.0 | 1.15.3 | | January 2022 |
    • Syslog RFC compliance for Linux
    • Fixed issue for Linux perf counters not flowing on restart
    • Fixed installation failure on Windows Server 2008 R2 SP1
    | 1.1.5.1Hotfix | 1.15.2.0Hotfix | diff --git a/articles/azure-monitor/agents/azure-monitor-agent-overview.md b/articles/azure-monitor/agents/azure-monitor-agent-overview.md index 1359c61ad0f3..0db75380d3c7 100644 --- a/articles/azure-monitor/agents/azure-monitor-agent-overview.md +++ b/articles/azure-monitor/agents/azure-monitor-agent-overview.md @@ -70,7 +70,7 @@ The Azure Monitor agent can coexist (run side by side on the same machine) with | Resource type | Installation method | Additional information | |:---|:---|:---| | Virtual machines, scale sets | [Virtual machine extension](./azure-monitor-agent-manage.md#virtual-machine-extension-details) | Installs the agent using Azure extension framework | -| On-premise servers (Arc-enabled servers) | [Virtual machine extension](./azure-monitor-agent-manage.md#virtual-machine-extension-details) (after installing [Arc agent](/azure/azure-arc/servers/deployment-options)) | Installs the agent using Azure extension framework, provided for on-premise by first installing [Arc agent](/azure/azure-arc/servers/deployment-options) | +| On-premise servers (Arc-enabled servers) | [Virtual machine extension](./azure-monitor-agent-manage.md#virtual-machine-extension-details) (after installing [Arc agent](../../azure-arc/servers/deployment-options.md)) | Installs the agent using Azure extension framework, provided for on-premise by first installing [Arc agent](../../azure-arc/servers/deployment-options.md) | | Windows 10, 11 desktops, workstations | [Client installer (preview)](./azure-monitor-agent-windows-client.md) | Installs the agent using a Windows MSI installer | | Windows 10, 11 laptops | [Client installer (preview)](./azure-monitor-agent-windows-client.md) | Installs the agent using a Windows MSI installer. The installs works on laptops but the agent is **not optimized yet** for battery, network consumption | @@ -211,4 +211,4 @@ To configure the agent to use private links for network communications with Azur ## Next steps - [Install the Azure Monitor agent](azure-monitor-agent-manage.md) on Windows and Linux virtual machines. -- [Create a data collection rule](data-collection-rule-azure-monitor-agent.md) to collect data from the agent and send it to Azure Monitor. +- [Create a data collection rule](data-collection-rule-azure-monitor-agent.md) to collect data from the agent and send it to Azure Monitor. \ No newline at end of file diff --git a/articles/azure-monitor/agents/data-collection-text-log.md b/articles/azure-monitor/agents/data-collection-text-log.md index 4b05ac45f621..4b0b88ed86cc 100644 --- a/articles/azure-monitor/agents/data-collection-text-log.md +++ b/articles/azure-monitor/agents/data-collection-text-log.md @@ -15,7 +15,7 @@ This article describes how to configure the collection of file-based text logs, ## Prerequisites To complete this procedure, you need the following: -- Log Analytics workspace where you have at least [contributor rights](../logs/manage-access.md#manage-access-using-azure-permissions) . +- Log Analytics workspace where you have at least [contributor rights](../logs/manage-access.md#azure-rbac) . - [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. - An agent with supported log file as described in the next section. @@ -356,7 +356,7 @@ The [data collection rule (DCR)](../essentials/data-collection-rule-overview.md) "Microsoft-W3CIISLog" ], "logDirectories": [ - "C:\\inetpub\\logs\\LogFiles\\*.log" + "C:\\inetpub\\logs\\LogFiles\\" ], "name": "myIisLogsDataSource" } diff --git a/articles/azure-monitor/agents/om-agents.md b/articles/azure-monitor/agents/om-agents.md index 8c8e584fff45..36ddf2308459 100644 --- a/articles/azure-monitor/agents/om-agents.md +++ b/articles/azure-monitor/agents/om-agents.md @@ -31,8 +31,8 @@ Before starting, review the following requirements. * Azure Monitor only supports System Center Operations Manager 2016 or later, Operations Manager 2012 SP1 UR6 or later, and Operations Manager 2012 R2 UR2 or later. Proxy support was added in Operations Manager 2012 SP1 UR7 and Operations Manager 2012 R2 UR3. * Integrating System Center Operations Manager 2016 with US Government cloud requires an updated Advisor management pack included with Update Rollup 2 or later. System Center Operations Manager 2012 R2 requires an updated Advisor management pack included with Update Rollup 3 or later. * All Operations Manager agents must meet minimum support requirements. Ensure that agents are at the minimum update, otherwise Windows agent communication may fail and generate errors in the Operations Manager event log. -* A Log Analytics workspace. For further information, review [Log Analytics workspace overview](../logs/design-logs-deployment.md). -* You authenticate to Azure with an account that is a member of the [Log Analytics Contributor role](../logs/manage-access.md#manage-access-using-azure-permissions). +* A Log Analytics workspace. For further information, review [Log Analytics workspace overview](../logs/workspace-design.md). +* You authenticate to Azure with an account that is a member of the [Log Analytics Contributor role](../logs/manage-access.md#azure-rbac). * Supported Regions - Only the following Azure regions are supported by System Center Operations Manager to connect to a Log Analytics workspace: - West Central US diff --git a/articles/azure-monitor/alerts/activity-log-alerts.md b/articles/azure-monitor/alerts/activity-log-alerts.md deleted file mode 100644 index f098fe502886..000000000000 --- a/articles/azure-monitor/alerts/activity-log-alerts.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Activity log alerts in Azure Monitor -description: Be notified via SMS, webhook, SMS, email and more, when certain events occur in the activity log. -ms.topic: conceptual -ms.date: 04/04/2022 - ---- - -# Alerts on activity log - -## Overview - -Activity log alerts allow you to be notified on events and operations that are logged in [Azure Activity Log](../essentials/activity-log.md). An alert is fired when a new [activity log event](../essentials/activity-log-schema.md) occurs that matches the conditions specified in the alert rule. - -Activity log alert rules are Azure resources, so they can be created by using an Azure Resource Manager template. They also can be created, updated, or deleted in the Azure portal. This article introduces the concepts behind activity log alerts. For more information on creating or usage of activity log alert rules, see [Create and manage activity log alerts](./alerts-activity-log.md). - -## Alerting on activity log event categories - -You can create activity log alert rules to receive notifications on one of the following activity log event categories: - -| Event Category | Category Description | Example | -|----------------|-------------|---------| -| Administrative | ARM operation (e.g. create, update, delete, or action) was performed on resources in your subscription, resource group, or on a specific Azure resource.| A virtual machine in your resource group is deleted | -| Service health | Service incidents (e.g. an outage or a maintenance event) occurred that may impact services in your subscription on a specific region.| An outage impacting VMs in your subscription in East US. | -| Resource health | The health of a specific resource is degraded, or the resource becomes unavailable. | A VM in your subscription transitions to a degraded or unavailable state. | -| Autoscale | An Azure Autoscale operation has occurred, resulting in success or failure | An autoscale action on a virtual machine scale set in your subscription failed. | -| Recommendation | A new Azure Advisor recommendation is available for your subscription | A high-impact recommendation for your subscription was received. | -| Security | Events detected by Microsoft Defender for Cloud | A suspicious double extension file executed was detected in your subscription | -| Policy | Operations performed by Azure Policy | Policy Deny event occurred in your subscription. | - -> [!NOTE] -> Alert rules **cannot** be created for events in Alert category of activity log. - - -## Configuring activity log alert rules - -You can configure an activity log alert rule based on any top-level property in the JSON object for an activity log event. For more information, see [Categories in the Activity Log](../essentials/activity-log.md#view-the-activity-log). - -An alternative simple way for creating conditions for activity log alert rules is to explore or filter events via [Activity log in Azure portal](../essentials/activity-log.md#view-the-activity-log). In Azure Monitor - Activity log, one can filter and locate a required event and then create an alert rule to notify on similar events by using the **New alert rule** button. - -> [!NOTE] -> An activity log alert rule monitors only for events in the subscription in which the alert rule is created. - -Activity log events have a few common properties which can be used to define an activity log alert rule condition: - -- **Category**: Administrative, Service Health, Resource Health, Autoscale, Security, Policy, or Recommendation. -- **Scope**: The individual resource or set of resource(s) for which the alert on activity log is defined. Scope for an activity log alert can be defined at various levels: - - Resource Level: For example, for a specific virtual machine - - Resource Group Level: For example, all virtual machines in a specific resource group - - Subscription Level: For example, all virtual machines in a subscription (or) all resources in a subscription -- **Resource group**: By default, the alert rule is saved in the same resource group as that of the target defined in Scope. The user can also define the Resource Group where the alert rule should be stored. -- **Resource type**: Resource Manager defined namespace for the target of the alert rule. -- **Operation name**: The [Azure resource provider operation](../../role-based-access-control/resource-provider-operations.md) name utilized for Azure role-based access control. Operations not registered with Azure Resource Manager cannot be used in an activity log alert rule. -- **Level**: The severity level of the event (Informational, Warning, Error, or Critical). -- **Status**: The status of the event, typically Started, Failed, or Succeeded. -- **Event initiated by**: Also known as the "caller." The email address or Azure Active Directory identifier of the user (or application) who performed the operation. - -In addition to these comment properties, different activity log events have category-specific properties that can be used to configure an alert rule for events of each category. For example, when creating a service health alert rule you can configure a condition on the impacted region or service that appear in the event. - -## Using action groups - -When an activity log alert is fired, it uses an action group to trigger actions or send notifications. An action group is a reusable set of notification receivers, such as email addresses, webhook URLs, or SMS phone numbers. The receivers can be referenced from multiple alerts rules to centralize and group your notification channels. When you define your activity log alert rule, you have two options. You can: - -* Use an existing action group in your activity log alert rule. -* Create a new action group. - -To learn more about action groups, see [Create and manage action groups in the Azure portal](./action-groups.md). - -## Activity log alert rules limit -You can create up to 100 active activity log alert rules per subscription (including rules for all activity log event categories, such as resource health or service health). This limit can't be increased. -If you are reaching near this limit, there are several guidelines you can follow to optimize the use of activity log alerts rules, so that you can cover more resources and events with the same number of rules: -* A single activity log alert rule can be configured to cover the scope of a single resource, a resource group, or an entire subscription. To reduce the number of rules you're using, consider to replace multiple rules covering a narrow scope with a single rule covering a broad scope. For example, if you have multiple VMs in a subscription, and you want an alert to be triggered whenever one of them is restarted, you can use a single activity log alert rule to cover all the VMs in your subscription. The alert will be triggered whenever any VM in the subscription is restarted. -* A single service health alert rule can cover all the services and Azure regions used by your subscription. If you're using multiple service health alert rules per subscription, you can replace them with a single rule (or with a small number of rules, if you prefer). -* A single resource health alert rule can cover multiple resource types and resources in your subscription. If you're using multiple resource health alert rules per subscription, you can replace them with a smaller number of rules (or even a single rule) that covers multiple resource types. - - -## Next steps - -- Get an [overview of alerts](./alerts-overview.md). -- Learn about [create and modify activity log alerts](alerts-activity-log.md). -- Review the [activity log alert webhook schema](../alerts/activity-log-alerts-webhook.md). -- Learn more about [service health alerts](../../service-health/service-notifications.md). -- Learn more about [Resource health alerts](../../service-health/resource-health-alert-monitor-guide.md). -- Learn more about [Recommendation alerts](../../advisor/advisor-alerts-portal.md). diff --git a/articles/azure-monitor/alerts/alerts-common-schema-definitions.md b/articles/azure-monitor/alerts/alerts-common-schema-definitions.md index 9eb048e9bcc1..859ce511e33c 100644 --- a/articles/azure-monitor/alerts/alerts-common-schema-definitions.md +++ b/articles/azure-monitor/alerts/alerts-common-schema-definitions.md @@ -11,7 +11,7 @@ ms.date: 07/20/2021 This article describes the [common alert schema definitions](./alerts-common-schema.md) for Azure Monitor, including those for webhooks, Azure Logic Apps, Azure Functions, and Azure Automation runbooks. Any alert instance describes the resource that was affected and the cause of the alert. These instances are described in the common schema in the following sections: -* **Essentials**: A set of standardized fields, common across all alert types, which describe what resource the alert is on, along with additional common alert metadata (for example, severity or description). Definitions of severity can be found in the [alerts overview](alerts-overview.md#overview). +* **Essentials**: A set of standardized fields, common across all alert types, which describe what resource the alert is on, along with additional common alert metadata (for example, severity or description). * **Alert context**: A set of fields that describes the cause of the alert, with fields that vary based on the alert type. For example, a metric alert includes fields like the metric name and metric value in the alert context, whereas an activity log alert has information about the event that generated the alert. **Sample alert payload** diff --git a/articles/azure-monitor/alerts/alerts-common-schema-test-action-definitions.md b/articles/azure-monitor/alerts/alerts-common-schema-test-action-definitions.md index dda4f50b28c7..63d2f509898d 100644 --- a/articles/azure-monitor/alerts/alerts-common-schema-test-action-definitions.md +++ b/articles/azure-monitor/alerts/alerts-common-schema-test-action-definitions.md @@ -11,7 +11,7 @@ ms.date: 01/14/2022 This article describes the [common alert schema definitions](./alerts-common-schema.md) for Azure Monitor, including those for webhooks, Azure Logic Apps, Azure Functions, and Azure Automation runbooks. Any alert instance describes the resource that was affected and the cause of the alert. These instances are described in the common schema in the following sections: -* **Essentials**: A set of standardized fields, common across all alert types, which describe what resource the alert is on, along with additional common alert metadata (for example, severity or description). Definitions of severity can be found in the [alerts overview](alerts-overview.md#overview). +* **Essentials**: A set of standardized fields, common across all alert types, which describe what resource the alert is on, along with additional common alert metadata (for example, severity or description). * **Alert context**: A set of fields that describes the cause of the alert, with fields that vary based on the alert type. For example, a metric alert includes fields like the metric name and metric value in the alert context, whereas an activity log alert has information about the event that generated the alert. **Sample alert payload** diff --git a/articles/azure-monitor/alerts/alerts-dynamic-thresholds.md b/articles/azure-monitor/alerts/alerts-dynamic-thresholds.md index 5e06e97c88e3..e10db5b7626d 100644 --- a/articles/azure-monitor/alerts/alerts-dynamic-thresholds.md +++ b/articles/azure-monitor/alerts/alerts-dynamic-thresholds.md @@ -73,7 +73,7 @@ To trigger an alert when there was a violation from a Dynamic Thresholds in 20 m ## How do you find out why a Dynamic Thresholds alert was triggered? -You can explore triggered alert instances in the alerts view either by clicking on the link in the email or text message, or browser to see the alerts view in the Azure portal. [Learn more about the alerts view](./alerts-overview.md#alerts-experience). +You can explore triggered alert instances by clicking on the link in the email or text message, or browse to see the alerts in the Azure portal. [Learn more about the alerts view](./alerts-page.md). The alert view displays: diff --git a/articles/azure-monitor/alerts/alerts-log.md b/articles/azure-monitor/alerts/alerts-log.md index 07fef8a10a4c..933cc24094bc 100644 --- a/articles/azure-monitor/alerts/alerts-log.md +++ b/articles/azure-monitor/alerts/alerts-log.md @@ -1,38 +1,25 @@ --- -title: Create, view, and manage log alert rules Using Azure Monitor | Microsoft Docs -description: Use Azure Monitor to create, view, and manage log alert rules +title: Create Azure Monitor log alert rules and manage alert instances | Microsoft Docs +description: Create Azure Monitor log alert rules and manage your alert instances. author: AbbyMSFT ms.author: abbyweisberg ms.topic: conceptual -ms.date: 2/23/2022 +ms.date: 05/23/2022 ms.custom: devx-track-azurepowershell, devx-track-azurecli +ms.reviewer: yanivlavi --- -# Create, view, and manage log alerts using Azure Monitor +# Create Azure Monitor log alert rules and manage alert instances -This article shows you how to create and manage log alerts. Azure Monitor log alerts allow users to use a [Log Analytics](../logs/log-analytics-tutorial.md) query to evaluate resource logs at a set frequency and fire an alert based on the results. Rules can trigger one or more actions using [Action Groups](./action-groups.md). [Learn more about functionality and terminology of log alerts](./alerts-unified-log.md). +This article shows you how to create log alert rules and manage your alert instances. Azure Monitor log alerts allow users to use a [Log Analytics](../logs/log-analytics-tutorial.md) query to evaluate resource logs at a set frequency and fire an alert based on the results. Rules can trigger one or more actions using [alert processing rules](alerts-action-rules.md) and [action groups](./action-groups.md). Learn the concepts behind log alerts [here](alerts-types.md#log-alerts). - Alert rules are defined by three components: +When an alert is triggered by an alert rule, - Target: A specific Azure resource to monitor. - Criteria: Logic to evaluate. If met, the alert fires. - Action: Notifications or automation - email, SMS, webhook, and so on. You can also [create log alert rules using Azure Resource Manager templates](../alerts/alerts-log-create-templates.md). ## Create a new log alert rule in the Azure portal -> [!NOTE] -> This article describes creating alert rules using the new alert rule wizard. -> The new alert rule experience is a little different than the old experience. Please note these changes: -> - Previously, search results were included in the payloads of the triggered alert and its associated notifications. This was a limited and error prone solution. To get detailed context information about the alert so that you can decide on the appropriate action : -> - The recommended best practice it to use [Dimensions](alerts-unified-log.md#split-by-alert-dimensions). Dimensions provide the column value that fired the alert, giving you context for why the alert fired and how to fix the issue. -> - When you need to investigate in the logs, use the link in the alert to the search results in Logs. -> - If you need the raw search results or for any other advanced customizations, use Logic Apps. -> - The new alert rule wizard does not support customization of the JSON payload. -> - Use custom properties in the [new API](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules/create-or-update#actions) to add static parameters and associated values to the webhook actions triggered by the alert. -> - For more advanced customizations, use Logic Apps. -> - The new alert rule wizard does not support customization of the email subject. -> - Customers often use the custom email subject to indicate the resource on which the alert fired, instead of using the Log Analytics workspace. Use the [new API](alerts-unified-log.md#split-by-alert-dimensions) to trigger an alert of the desired resource using the resource id column. -> - For more advanced customizations, use Logic Apps. - -1. In the [portal](https://portal.azure.com/), select the relevant resource. We recommend monitoring at scale by using a subscription or resource group for the alert rule. +1. In the [portal](https://portal.azure.com/), select the relevant resource. We recommend monitoring at scale by using a subscription or resource group. 1. In the Resource menu, select **Logs**. 1. Write a query that will find the log events for which you want to create an alert. You can use the [alert query examples article](../logs/queries.md) to understand what you can discover or [get started on writing your own query](../logs/log-analytics-tutorial.md). Also, [learn how to create optimized alert queries](alerts-log-query.md). 1. From the top command bar, Select **+ New Alert rule**. @@ -40,31 +27,66 @@ You can also [create log alert rules using Azure Resource Manager templates](../ :::image type="content" source="media/alerts-log/alerts-create-new-alert-rule.png" alt-text="Create new alert rule." lightbox="media/alerts-log/alerts-create-new-alert-rule-expanded.png"::: 1. The **Condition** tab opens, populated with your log query. + + By default, the rule counts the number of results in the last 5 minutes. + + If the system detects summarized query results, the rule is automatically updated with that information. :::image type="content" source="media/alerts-log/alerts-logs-conditions-tab.png" alt-text="Conditions Tab."::: -1. In the **Measurement** section, select values for the [**Measure**](./alerts-unified-log.md#measure), [**Aggregation type**](./alerts-unified-log.md#aggregation-type), and [**Aggregation granularity**](./alerts-unified-log.md#aggregation-granularity) fields. - - By default, the rule counts the number of results in the last 5 minutes. - - If the system detects summarized query results, the rule is automatically updated to capture that. - +1. In the **Measurement** section, select values for these fields: + + |Field |Description | + |---------|---------| + |Measure|Log alerts can measure two different things, which can be used for different monitoring scenarios:
    **Table rows**: The number of rows returned can be used to work with events such as Windows event logs, syslog, application exceptions.
    **Calculation of a numeric column**: Calculations based on any numeric column can be used to include any number of resources. For example, CPU percentage. | + |Aggregation type| The calculation performed on multiple records to aggregate them to one numeric value using the aggregation granularity. For example: Total, Average, Minimum, or Maximum. | + |Aggregation granularity| The interval for aggregating multiple records to one numeric value.| + :::image type="content" source="media/alerts-log/alerts-log-measurements.png" alt-text="Measurements."::: -1. (Optional) In the **Split by dimensions** section, select [alert splitting by dimensions](./alerts-unified-log.md#split-by-alert-dimensions): - - If detected, The **Resource ID column** is selected automatically and changes the context of the fired alert to the record's resource. - - Clear the **Resource ID column** to fire alerts on multiple resources in subscriptions or resource groups. For example, you can create a query that checks if 80% of the resource group's virtual machines are experiencing high CPU usage. - - You can use the dimensions table to select up to six more splittings for any number or text columns types. - - Alerts are fired individually for each unique splitting combination. The alert payload includes the combination that triggered the alert. -1. In the **Alert logic** section, set the **Alert logic**: [**Operator**, **Threshold Value**](./alerts-unified-log.md#threshold-and-operator), and [**Frequency**](./alerts-unified-log.md#frequency). +1. (Optional) In the **Split by dimensions** section, you can create resource-centric alerts at scale for a subscription or resource group. Splitting by dimensions groups combinations of numerical or string columns to monitor for the same condition on multiple Azure resources. - :::image type="content" source="media/alerts-log/alerts-rule-preview-agg-params-and-splitting.png" alt-text="Preview alert rule parameters."::: + If you select more than one dimension value, each time series that results from the combination triggers its own alert and is charged separately. The alert payload includes the combination that triggered the alert. -1. (Optional) In the **Advanced options** section, set the [**Number of violations to trigger the alert**](./alerts-unified-log.md#number-of-violations-to-trigger-alert). + You can select up to six more splittings for any number or text columns types. + + You can also decide **not** to split when you want a condition applied to multiple resources in the scope. For example, if you want to fire an alert if at least five machines in the resource group scope have CPU usage over 80%. + + Select values for these fields: + + |Field |Description | + |---------|---------| + |Dimension name|Dimensions can be either number or string columns. Dimensions are used to monitor specific time series and provide context to a fired alert.
    Splitting on the Azure Resource ID column makes the specified resource into the alert target. If an Resource ID column is detected, it is selected automatically and changes the context of the fired alert to the record's resource. | + |Operator|The operator used on the dimension name and value. | + |Dimension values|The dimension values are based on data from the last 48 hours. Select **Add custom value** to add custom dimension values. | + + :::image type="content" source="media/alerts-log/alerts-create-log-rule-dimensions.png" alt-text="Screenshot of the splitting by dimensions section of a new log alert rule."::: - :::image type="content" source="media/alerts-log/alerts-rule-preview-advanced-options.png" alt-text="Advanced options."::: +1. In the **Alert logic** section, select values for these fields: + + |Field |Description | + |---------|---------| + |Operator| The query results are transformed into a number. In this field, select the operator to use to compare the number against the threshold.| + |Threshold value| A number value for the threshold. | + |Frequency of evaluation|The interval in which the query is run. Can be set from a minute to a day. | + + :::image type="content" source="media/alerts-log/alerts-create-log-rule-logic.png" alt-text="Screenshot of alert logic section of a new log alert rule."::: + +1. (Optional) In the **Advanced options** section, you can specify the number of failures and the alert evaluation period required to trigger an alert. For example, if you set the **Aggregation granularity** to 5 minutes, you can specify that you only want to trigger an alert if there were three failures (15 minutes) in the last hour. This setting is defined by your application business policy. + + Select values for these fields under **Number of violations to trigger the alert**: + + |Field |Description | + |---------|---------| + |Number of violations|The number of violations that have to occur to trigger the alert.| + |Evaluation period|The amount of time within which those violations have to occur. | + |Override query time range| Enter a value for this field if the alert evaluation period is different than the query time range.| + + :::image type="content" source="media/alerts-log/alerts-rule-preview-advanced-options.png" alt-text="Screenshot of the advanced options section of a new log alert rule."::: 1. The **Preview** chart shows query evaluations results over time. You can change the chart period or select different time series that resulted from unique alert splitting by dimensions. - :::image type="content" source="media/alerts-log/alerts-create-alert-rule-preview.png" alt-text="Alert rule preview."::: + :::image type="content" source="media/alerts-log/alerts-create-alert-rule-preview.png" alt-text="Screenshot of a preview of a new alert rule."::: 1. From this point on, you can select the **Review + create** button at any time. 1. In the **Actions** tab, select or create the required [action groups](./action-groups.md). @@ -72,12 +94,12 @@ You can also [create log alert rules using Azure Resource Manager templates](../ :::image type="content" source="media/alerts-log/alerts-rule-actions-tab.png" alt-text="Actions tab."::: 1. In the **Details** tab, define the **Project details** and the **Alert rule details**. -1. (Optional) In the **Advanced options** section, you can set several options, including whether to **Enable upon creation**, or to [**Mute actions**](./alerts-unified-log.md#state-and-resolving-alerts) for a period after the alert rule fires. +1. (Optional) In the **Advanced options** section, you can set several options, including whether to **Enable upon creation**, or to **Mute actions** for a period of time after the alert rule fires. :::image type="content" source="media/alerts-log/alerts-rule-details-tab.png" alt-text="Details tab."::: -> [!NOTE] -> If you, or your administrator assigned the Azure Policy **Azure Log Search Alerts over Log Analytics workspaces should use customer-managed keys**, you must select **Check workspace linked storage** option in **Advanced options**, or the rule creation will fail as it will not meet the policy requirements. + > [!NOTE] + > If you, or your administrator assigned the Azure Policy **Azure Log Search Alerts over Log Analytics workspaces should use customer-managed keys**, you must select **Check workspace linked storage** option in **Advanced options**, or the rule creation will fail as it will not meet the policy requirements. 1. In the **Tags** tab, set any required tags on the alert rule resource. @@ -88,6 +110,20 @@ You can also [create log alert rules using Azure Resource Manager templates](../ :::image type="content" source="media/alerts-log/alerts-rule-review-create.png" alt-text="Review and create tab."::: +> [!NOTE] +> This section above describes creating alert rules using the new alert rule wizard. +> The new alert rule experience is a little different than the old experience. Please note these changes: +> - Previously, search results were included in the payloads of the triggered alert and its associated notifications. This was a limited and error prone solution. To get detailed context information about the alert so that you can decide on the appropriate action : +> - The recommended best practice it to use [Dimensions](alerts-unified-log.md#split-by-alert-dimensions). Dimensions provide the column value that fired the alert, giving you context for why the alert fired and how to fix the issue. +> - When you need to investigate in the logs, use the link in the alert to the search results in Logs. +> - If you need the raw search results or for any other advanced customizations, use Logic Apps. +> - The new alert rule wizard does not support customization of the JSON payload. +> - Use custom properties in the [new API](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules/create-or-update#actions) to add static parameters and associated values to the webhook actions triggered by the alert. +> - For more advanced customizations, use Logic Apps. +> - The new alert rule wizard does not support customization of the email subject. +> - Customers often use the custom email subject to indicate the resource on which the alert fired, instead of using the Log Analytics workspace. Use the [new API](alerts-unified-log.md#split-by-alert-dimensions) to trigger an alert of the desired resource using the resource id column. +> - For more advanced customizations, use Logic Apps. + ## Enable recommended out-of-the-box alert rules in the Azure portal (preview) > [!NOTE] > The alert rule recommendations feature is currently in preview and is only enabled for VMs. @@ -165,7 +201,7 @@ az deployment group create \ On success for creation, 201 is returned. On success for update, 200 is returned. ## Next steps -* Learn about [log alerts](./alerts-unified-log.md). +* Learn about [Log alerts](alerts-types.md#log-alerts). * Create log alerts using [Azure Resource Manager Templates](./alerts-log-create-templates.md). * Understand [webhook actions for log alerts](./alerts-log-webhook.md). * Learn more about [log queries](../logs/log-query-overview.md). diff --git a/articles/azure-monitor/alerts/alerts-metric-overview.md b/articles/azure-monitor/alerts/alerts-metric-overview.md deleted file mode 100644 index a6407880caa4..000000000000 --- a/articles/azure-monitor/alerts/alerts-metric-overview.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Understand how metric alerts work in Azure Monitor. -description: Get an overview of what you can do with metric alerts and how they work in Azure Monitor. -ms.date: 10/14/2021 -ms.topic: conceptual - ---- - -# Understand how metric alerts work in Azure Monitor - -Metric alerts in Azure Monitor work on top of multi-dimensional metrics. These metrics could be [platform metrics](alerts-metric-near-real-time.md#metrics-and-dimensions-supported), [custom metrics](../essentials/metrics-custom-overview.md), [popular logs from Azure Monitor converted to metrics](./alerts-metric-logs.md) and Application Insights metrics. Metric alerts evaluate at regular intervals to check if conditions on one or more metric time-series are true and notify you when the evaluations are met. Metric alerts are stateful by default, that is, they only send out notifications when the state changes (fired, resolved). If you want to make them stateless, see [make metric alerts occur every time my condition is met](alerts-troubleshoot-metric.md#make-metric-alerts-occur-every-time-my-condition-is-met). - -## How do metric alerts work? - -You can define a metric alert rule by specifying a target resource to be monitored, metric name, condition type (static or dynamic), and the condition (an operator and a threshold/sensitivity) and an action group to be triggered when the alert rule fires. Condition types affect the way thresholds are determined. [Learn more about Dynamic Thresholds condition type and sensitivity options](../alerts/alerts-dynamic-thresholds.md). - -### Alert rule with static condition type - -Let's say you have created a simple static threshold metric alert rule as follows: - -- Target Resource (the Azure resource you want to monitor): myVM -- Metric: Percentage CPU -- Condition Type: Static -- Aggregation type (a statistic that is run over raw metric values. [Supported aggregation types](../essentials/metrics-aggregation-explained.md#aggregation-types) are Minimum, Maximum, Average, Total, Count): Average -- Period (the look back window over which metric values are checked): Over the last 5 mins -- Frequency (the frequency with which the metric alert checks if the conditions are met): 1 min -- Operator: Greater Than -- Threshold: 70 - -From the time the alert rule is created, the monitor runs every 1 min and looks at metric values for the last 5 minutes and checks if the average of those values exceeds 70. If the condition is met that is, the average Percentage CPU for the last 5 minutes exceeds 70, the alert rule fires an activated notification. If you have configured an email or a web hook action in the action group associated with the alert rule, you will receive an activated notification on both. - -When you are using multiple conditions in one rule, the rule "ands" the conditions together. That is, an alert fires when all the conditions in the alert rule evaluate as true and resolve when one of the conditions is no longer true. An example for this type of alert rule would be to monitor an Azure virtual machine and alert when both "Percentage CPU is higher than 90%" and "Queue length is over 300 items". - -### Alert rule with dynamic condition type - -Let's say you have created a simple Dynamic Thresholds metric alert rule as follows: - -- Target Resource (the Azure resource you want to monitor): myVM -- Metric: Percentage CPU -- Condition Type: Dynamic -- Aggregation Type (a statistic that is run over raw metric values. [Supported aggregation types](../essentials/metrics-aggregation-explained.md#aggregation-types) are Minimum, Maximum, Average, Total, Count): Average -- Period (the look back window over which metric values are checked): Over the last 5 mins -- Frequency (the frequency with which the metric alert checks if the conditions are met): 1 min -- Operator: Greater Than -- Sensitivity: Medium -- Look Back Periods: 4 -- Number of Violations: 4 - -Once the alert rule is created, the Dynamic Thresholds machine learning algorithm will acquire historical data that is available, calculate threshold that best fits the metric series behavior pattern and will continuously learn based on new data to make the threshold more accurate. - -From the time the alert rule is created, the monitor runs every 1 min and looks at metric values in the last 20 minutes grouped into 5 minutes periods and checks if the average of the period values in each of the 4 periods exceeds the expected threshold. If the condition is met that is, the average Percentage CPU in the last 20 minutes (four 5 minutes periods) deviated from expected behavior four times, the alert rule fires an activated notification. If you have configured an email or a web hook action in the action group associated with the alert rule, you will receive an activated notification on both. - -### View and resolution of fired alerts - -The above examples of alert rules firing can also be viewed in the Azure portal in the **All Alerts** blade. - -Say the usage on "myVM" continues being above the threshold in subsequent checks, the alert rule will not fire again until the conditions are resolved. - -After some time, the usage on "myVM" comes back down to normal (goes below the threshold). The alert rule monitors the condition for two more times, to send out a resolved notification. The alert rule sends out a resolved/deactivated message when the alert condition is not met for three consecutive periods to reduce noise in case of flapping conditions. - -As the resolved notification is sent out via web hooks or email, the status of the alert instance (called monitor state) in Azure portal is also set to resolved. - -> [!NOTE] -> -> When an alert rule monitors multiple conditions, a fired alert will be resolved if at least one of the conditions is no longer met for three consecutive periods. - -### Using dimensions - -Metric alerts in Azure Monitor also support monitoring multiple dimensions value combinations with one rule. Let's understand why you might use multiple dimension combinations with the help of an example. - -Say you have an App Service plan for your website. You want to monitor CPU usage on multiple instances running your web site/app. You can do that using a metric alert rule as follows: - -- Target resource: myAppServicePlan -- Metric: Percentage CPU -- Condition Type: Static -- Dimensions - - Instance = InstanceName1, InstanceName2 -- Aggregation Type: Average -- Period: Over the last 5 mins -- Frequency: 1 min -- Operator: GreaterThan -- Threshold: 70 - -Like before, this rule monitors if the average CPU usage for the last 5 minutes exceeds 70%. However, with the same rule you can monitor two instances running your website. Each instance will get monitored individually and you will get notifications individually. - -Say you have a web app that is seeing massive demand and you will need to add more instances. The above rule still monitors just two instances. However, you can create a rule as follows: - -- Target resource: myAppServicePlan -- Metric: Percentage CPU -- Condition Type: Static -- Dimensions - - Instance = * -- Aggregation Type: Average -- Period: Over the last 5 mins -- Frequency: 1 min -- Operator: GreaterThan -- Threshold: 70 - -This rule will automatically monitor all values for the instance i.e you can monitor your instances as they come up without needing to modify your metric alert rule again. - -When monitoring multiple dimensions, Dynamic Thresholds alerts rule can create tailored thresholds for hundreds of metric series at a time. Dynamic Thresholds results in fewer alert rules to manage and significant time saving on management and creation of alerts rules. - -Say you have a web app with many instances and you don't know what the most suitable threshold is. The above rules will always use threshold of 70%. However, you can create a rule as follows: - -- Target resource: myAppServicePlan -- Metric: Percentage CPU -- Condition Type: Dynamic -- Dimensions - - Instance = * -- Aggregation Type: Average -- Period: Over the last 5 mins -- Frequency: 1 min -- Operator: GreaterThan -- Sensitivity: Medium -- Look Back Periods: 1 -- Number of Violations: 1 - -This rule monitors if the average CPU usage for the last 5 minutes exceeds the expected behavior for each instance. The same rule you can monitor instances as they come up without needing to modify your metric alert rule again. Each instance will get a threshold that fits the metric series behavior pattern and will continuously change based on new data to make the threshold more accurate. Like before, each instance will be monitored individually and you will get notifications individually. - -Increasing look-back periods and number of violations can also allow filtering alerts to only alert on your definition of a significant deviation. [Learn more about Dynamic Thresholds advanced options](../alerts/alerts-dynamic-thresholds.md#what-do-the-advanced-settings-in-dynamic-thresholds-mean). - -> [!NOTE] -> -> We recommend choosing an *Aggregation granularity (Period)* that is larger than the *Frequency of evaluation*, to reduce the likelihood of missing the first evaluation of added time series in the following cases: -> - Metric alert rule that monitors multiple dimensions – When a new dimension value combination is added -> - Metric alert rule that monitors multiple resources – When a new resource is added to the scope -> - Metric alert rule that monitors a metric that isn’t emitted continuously (sparse metric) – When the metric is emitted after a period longer than 24 hours in which it wasn’t emitted - -## Monitoring at scale using metric alerts in Azure Monitor - -So far, you have seen how a single metric alert could be used to monitor one or many metric time-series related to a single Azure resource. Many times, you might want the same alert rule applied to many resources. Azure Monitor also supports monitoring multiple resources (of the same type) with one metric alert rule, for resources that exist in the same Azure region. - -This feature is currently supported for platform metrics (not custom metrics) for the following services in the following Azure clouds: - -| Service | Public Azure | Government | China | -|:--------|:--------|:--------|:--------| -| Virtual machines1 | **Yes** | **Yes** | **Yes** | -| SQL server databases | **Yes** | **Yes** | **Yes** | -| SQL server elastic pools | **Yes** | **Yes** | **Yes** | -| NetApp files capacity pools | **Yes** | **Yes** | **Yes** | -| NetApp files volumes | **Yes** | **Yes** | **Yes** | -| Key vaults | **Yes** | **Yes** | **Yes** | -| Azure Cache for Redis | **Yes** | **Yes** | **Yes** | -| Data box edge devices | **Yes** | **Yes** | **Yes** | -| Recovery Services vaults | **Yes** | **No** | **No** | - -1 Not supported for virtual machine network metrics (Network In Total, Network Out Total, Inbound Flows, Outbound Flows, Inbound Flows Maximum Creation Rate, Outbound Flows Maximum Creation Rate). - -You can specify the scope of monitoring by a single metric alert rule in one of three ways. For example, with virtual machines you can specify the scope as: - -- a list of virtual machines (in one Azure region) within a subscription -- all virtual machines (in one Azure region) in one or more resource groups in a subscription -- all virtual machines (in one Azure region) in a subscription - -> [!NOTE] -> -> The scope of a multi-resource metric alert rule must contain at least one resource of the selected resource type. - -Creating metric alert rules that monitor multiple resources is like [creating any other metric alert](../alerts/alerts-metric.md) that monitors a single resource. Only difference is that you would select all the resources you want to monitor. You can also create these rules through [Azure Resource Manager templates](./alerts-metric-create-templates.md#template-for-a-metric-alert-that-monitors-multiple-resources). You will receive individual notifications for each monitored resource. - -> [!NOTE] -> -> In a metric alert rule that monitors multiple resources, only one condition is allowed. - -## Typical latency - -For metric alerts, typically you will get notified in under 5 minutes if you set the alert rule frequency to be 1 min. In cases of heavy load for notification systems, you might see a longer latency. - -## Supported resource types for metric alerts - -You can find the full list of supported resource types in this [article](./alerts-metric-near-real-time.md#metrics-and-dimensions-supported). - -## Pricing model - -Each Metrics Alert rule is billed based for time series monitored. Prices for Metric Alert rules are available on the [Azure Monitor pricing page](https://azure.microsoft.com/pricing/details/monitor/). - -## Next steps - -- [Learn how to create, view, and manage metric alerts in Azure](../alerts/alerts-metric.md) -- [Learn how to create alerts within Azure Monitor Metrics Explorer](../essentials/metrics-charts.md#alert-rules) -- [Learn how to deploy metric alerts using Azure Resource Manager templates](./alerts-metric-create-templates.md) -- [Learn more about action groups](./action-groups.md) -- [Learn more about Dynamic Thresholds condition type](../alerts/alerts-dynamic-thresholds.md) -- [Learn more about troubleshooting problems in metric alerts](alerts-troubleshoot-metric.md) diff --git a/articles/azure-monitor/alerts/alerts-overview.md b/articles/azure-monitor/alerts/alerts-overview.md index 0b22f92374d6..baaec3d5aeed 100644 --- a/articles/azure-monitor/alerts/alerts-overview.md +++ b/articles/azure-monitor/alerts/alerts-overview.md @@ -1,184 +1,104 @@ --- -title: Overview of alerting and notification monitoring in Azure -description: Overview of alerting in Azure Monitor -ms.topic: conceptual -ms.date: 02/14/2021 - +title: Overview of Azure Monitor Alerts +description: Learn about Azure Monitor alerts, alert rules, action processing rules, and action groups. You will learn how all of these work together to monitor your system and notify you if something is wrong. +author: AbbyMSFT +ms.author: abbyweisberg +ms.topic: overview +ms.date: 04/26/2022 +ms.custom: template-overview +ms.reviewer: harelb --- +# What are Azure Monitor Alerts? -# Overview of alerts in Microsoft Azure - -This article describes what alerts are, their benefits, and how to get started using them. - -## What are alerts in Microsoft Azure? - -Alerts proactively notify you when issues are found with your infrastructure or application using your monitoring data in Azure Monitor. They allow you to identify and address issues before the users of your system notice them. - -## Overview - -The diagram below represents the flow of alerts. - -![Diagram of alert flow](media/alerts-overview/Azure-Monitor-Alerts.svg) - -Alert rules are separated from alerts and the actions taken when an alert fires. The alert rule captures the target and criteria for alerting. The alert rule can be in an enabled or a disabled state. Alerts only fire when enabled. - -The following are key attributes of an alert rule: - -**Target Resource** - Defines the scope and signals available for alerting. A target can be any Azure resource. Example targets: - -- Virtual machines. -- Storage accounts. -- Log Analytics workspace. -- Application Insights. - -For certain resources (like virtual machines), you can specify multiple resources as the target of the alert rule. +Alerts help you detect and address issues before users notice them by proactively notifying you when Azure Monitor data indicates that there may be a problem with your infrastructure or application. -**Signal** - Emitted by the target resource. Signals can be of the following types: metric, activity log, Application Insights, and log. +You can alert on any metric or log data source in the Azure Monitor data platform. -**Criteria** - A combination of signal and logic applied on a target resource. Examples: +This diagram shows you how alerts work: -- Percentage CPU > 70% -- Server Response Time > 4 ms -- Result count of a log query > 100 +:::image type="content" source="media/alerts-overview/alerts-flow.png" alt-text="Graphic explaining Azure Monitor alerts."::: -**Alert Name** - A specific name for the alert rule configured by the user. - -**Alert Description** - A description for the alert rule configured by the user. - -**Severity** - The severity of the alert after the criteria specified in the alert rule is met. Severity can range from 0 to 4. - -- Sev 0 = Critical -- Sev 1 = Error -- Sev 2 = Warning -- Sev 3 = Informational -- Sev 4 = Verbose - -**Action** - A specific action taken when the alert is fired. For more information, see [Action Groups](../alerts/action-groups.md). - -## What you can alert on - -You can alert on metrics and logs, as described in [monitoring data sources](./../agents/data-sources.md). Signals include but aren't limited to: +An **alert rule** monitors your telemetry and captures a signal that indicates that something is happening on a specified target. The alert rule captures the signal and checks to see if the signal meets the criteria of the condition. If the conditions are met, an alert is triggered, which initiates the associated action group and updates the state of the alert. + +You create an alert rule by combining: + - The resource(s) to be monitored. + - The signal or telemetry from the resource + - Conditions + +If you're monitoring more than one resource, the condition is evaluated separately for each of the resources and alerts are fired for each resource separately. + +Once an alert is triggered, the alert is made up of: + - An **alert processing rule** allows you to apply processing on fired alerts. Alert processing rules modify the fired alerts as they are being fired. You can use alert processing rules to add or suppress action groups, apply filters or have the rule processed on a pre-defined schedule. + - An **action group** can trigger notifications or an automated workflow to let users know that an alert has been triggered. Action groups can include: + - Notification methods such as email, SMS, and push notifications. + - Automation Runbooks + - Azure functions + - ITSM incidents + - Logic Apps + - Secure webhooks + - Webhooks + - Event hubs +- The **alert condition** is set by the system. When an alert fires, the alert’s monitor condition is set to ‘fired’, and when the underlying condition that caused the alert to fire clears, the monitor condition is set to ‘resolved’. +- The **user response** is set by the user and doesn’t change until the user changes it. + +You can see all alert instances in all your Azure resources generated in the last 30 days on the **[Alerts page](alerts-page.md)** in the Azure portal. +## Types of alerts + +There are four types of alerts. This table provides a brief description of each alert type. +See [this article](alerts-types.md) for detailed information about each alert type and how to choose which alert type best suits your needs. + +|Alert type|Description| +|:---------|:---------| +|[Metric alerts](alerts-types.md#metric-alerts)|Metric alerts evaluate resource metrics at regular intervals. Metrics can be platform metrics, custom metrics, logs from Azure Monitor converted to metrics or Application Insights metrics. Metric alerts have several additional features (link), such as the ability to apply multiple conditions and dynamic thresholds.| +|[Log alerts](alerts-types.md#log-alerts)|Log alerts allow users to use a Log Analytics query to evaluate resource logs at a predefined frequency.| +|[Activity log alerts](alerts-types.md#activity-log-alerts)|Activity log alerts are triggered when a new activity log event occurs that matches the defined conditions.| +|[Smart detection alerts](alerts-types.md#smart-detection-alerts)|Smart detection on an Application Insights resource automatically warns you of potential performance problems and failure anomalies in your web application. You can migrate smart detection on your Application Insights resource to create alert rules for the different smart detection modules.| +## Out-of-the-box alert rules (preview) + +If you don't have alert rules defined for the selected resource, you can [enable recommended out-of-the-box alert rules in the Azure portal](alerts-log.md#enable-recommended-out-of-the-box-alert-rules-in-the-azure-portal-preview). -- Metric values -- Log search queries -- Activity log events -- Health of the underlying Azure platform -- Tests for website availability -## Alerts experience -### Alerts page -The Alerts page provides a summary of the alerts created in the last 24 hours. -### Alert Recommendations (preview) > [!NOTE] > The alert rule recommendations feature is currently in preview and is only enabled for VMs. -If you don't have alert rules defined for the selected resource, either individually or as part of a resource group or subscription, you can [create a new alert rule](alerts-log.md#create-a-new-log-alert-rule-in-the-azure-portal), or [enable recommended out-of-the-box alert rules in the Azure portal (preview)](alerts-log.md#enable-recommended-out-of-the-box-alert-rules-in-the-azure-portal-preview). - -:::image type="content" source="media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg" alt-text="Screenshot of alerts page with link to recommended alert rules."::: -### Alerts summary pane -If you have alerts configured for this resource, the alerts summary pane summarizes the alerts fired in the last 24 hours. You can filter the list by the subscription or any of the filter parameters at the top of the page. The page displays the total alerts for each severity. Select a severity to filter the alerts by that severity. -> [!NOTE] - > You can only access alerts generated in the last 30 days. - -You can also [programmatically enumerate the alert instances generated on your subscriptions by using REST APIs](#manage-your-alert-instances-programmatically). - -:::image type="content" source="media/alerts-overview/alerts-page.png" alt-text="Screenshot of alerts page."::: +## Azure role-based access control (Azure RBAC) for alerts -You can narrow down the list by selecting values from any of these filters at the top of the page: +You can only access, create, or manage alerts for resources for which you have permissions. +To create an alert rule, you need to have the following permissions: + - Read permission on the target resource of the alert rule + - Write permission on the resource group in which the alert rule is created (if you’re creating the alert rule from the Azure portal, the alert rule is created by default in the same resource group in which the target resource resides) + - Read permission on any action group associated to the alert rule (if applicable) +These built-in Azure roles, supported at all Azure Resource Manager scopes, have permissions to and access alerts information and create alert rules: + - monitoring contributor + - monitoring reader -| Column | Description | -|:---|:---| -| Subscription | Select the Azure subscriptions for which you want to view the alerts. You can optionally choose to select all your subscriptions. Only alerts that you have access to in the selected subscriptions are included in the view. | -| Resource group | Select a single resource group. Only alerts with targets in the selected resource group are included in the view. | -| Resource type | Select one or more resource types. Only alerts with targets of the selected type are included in the view. This column is only available after a resource group has been specified. | -| Resource | Select a resource. Only alerts with that resource as a target are included in the view. This column is only available after a resource type has been specified. | -| Severity | Select an alert severity, or select **All** to include alerts of all severities. | -| Alert condition | Select an alert condition, or select **All** to include alerts of all conditions. | -| User response | Select a user response, or select **All** to include alerts of all user responses. | -| Monitor service | Select a service, or select **All** to include all services. Only alerts created by rules that use service as a target are included. | -| Time range | Only alerts fired within the selected time range are included in the view. Supported values are the past hour, the past 24 hours, the past seven days, and the past 30 days. | +## Alerts and State -Select **Columns** at the top of the page to select which columns to show. -### Alert details pane +You can configure whether log or metric alerts are stateful or stateless. Activity log alerts are stateless. +- Stateless alerts fire each time the condition is met, even if fired previously. +- Stateful alerts fire when the condition is met and then don't fire again or trigger any more actions until the conditions are resolved. +For stateful alerts, the alert is considered resolved when: -When you select an alert, this alert details pane provides details of the alert and enables you to change how you want to respond to the alert. - -:::image type="content" source="media/alerts-overview/alert-detail-pane.png" alt-text="Screenshot of alert details pane."::: - -The Alert details pane includes: - - -|Section |Description | +|Alert type |The alert is resolved when | |---------|---------| -|Summary | Displays the properties and other significant information about the alert. | -|History | Lists all actions on the alert and any changes made to the alert. | -## Manage alerts - -You can set the user response of an alert to specify where it is in the resolution process. When the criteria specified in the alert rule is met, an alert is created or fired, and it has a status of *New*. You can change the status when you acknowledge an alert and when you close it. All user response changes are stored in the history of the alert. - -The following user responses are supported. - -| User Response | Description | -|:---|:---| -| New | The issue has been detected and hasn't yet been reviewed. | -| Acknowledged | An administrator has reviewed the alert and started working on it. | -| Closed | The issue has been resolved. After an alert has been closed, you can reopen it by changing it to another user response. | - -The *user response* is different and independent of the *alert condition*. The response is set by the user, while the alert condition is set by the system. When an alert fires, the alert's alert condition is set to *'fired'*, and when the underlying condition that caused the alert to fire clears, the alert condition is set to *'resolved'*. -## Manage alert rules - -To show the **Rules** page, select **Manage alert rules**. The Rules page is a single place for managing all alert rules across your Azure subscriptions. It lists all alert rules and can be sorted based on target resources, resource groups, rule name, or status. You can also edit, enable, or disable alert rules from this page. - - :::image type="content" source="media/alerts-overview/alerts-rules.png" alt-text="Screenshot of alert rules page."::: -## Create an alert rule -You can author alert rules in a consistent manner, whatever of the monitoring service or signal type. - -> [!VIDEO https://www.microsoft.com/en-us/videoplayer/embed/RE4tflw] - - -Here's how to create a new alert rule: -1. Pick the _target_ for the alert. -1. Select the _signal_ from the available signals for the target. -1. Specify the _logic_ to be applied to data from the signal. - -This simplified authoring process no longer requires you to know the monitoring source or signals that are supported before selecting an Azure resource. The list of available signals is automatically filtered based on the target resource that you select. Also based on that target, you're guided through defining the logic of the alert rule automatically. - -You can learn more about how to create alert rules in [Create, view, and manage alerts using Azure Monitor](../alerts/alerts-metric.md). - -Alerts are available across several Azure monitoring services. For information about how and when to use each of these services, see [Monitoring Azure applications and resources](../overview.md). - -## Azure role-based access control (Azure RBAC) for your alert instances - -The consumption and management of alert instances requires the user to have the Azure built-in roles of either [monitoring contributor](../../role-based-access-control/built-in-roles.md#monitoring-contributor) or [monitoring reader](../../role-based-access-control/built-in-roles.md#monitoring-reader). These roles are supported at any Azure Resource Manager scope, from the subscription level to granular assignments at a resource level. For example, if a user only has monitoring contributor access for virtual machine `ContosoVM1`, that user can consume and manage only alerts generated on `ContosoVM1`. - -## Manage your alert instances programmatically - -You might want to query programmatically for alerts generated against your subscription. Queries might be to create custom views outside of the Azure portal, or to analyze your alerts to identify patterns and trends. - -We recommended that you use [Azure Resource Graph](../../governance/resource-graph/overview.md) with the `AlertsManagementResources` schema for querying fired alerts. Resource Graph is recommended when you have to manage alerts generated across multiple subscriptions. - -The following sample request to the Resource Graph REST API returns alerts within one subscription in the last day: - -```json -{ - "subscriptions": [ - - ], - "query": "alertsmanagementresources | where properties.essentials.lastModifiedDateTime > ago(1d) | project alertInstanceId = id, parentRuleId = tolower(tostring(properties['essentials']['alertRule'])), sourceId = properties['essentials']['sourceCreatedId'], alertName = name, severity = properties.essentials.severity, status = properties.essentials.monitorCondition, state = properties.essentials.alertState, affectedResource = properties.essentials.targetResourceName, monitorService = properties.essentials.monitorService, signalType = properties.essentials.signalType, firedTime = properties['essentials']['startDateTime'], lastModifiedDate = properties.essentials.lastModifiedDateTime, lastModifiedBy = properties.essentials.lastModifiedUserName" -} -``` +|Metric alerts|The alert condition isn't met for three consecutive checks.| +|Log alerts|The alert condition isn't met for 30 minutes for a specific evaluation period (to account for log ingestion delay), and
    the alert condition isn't met for three consecutive checks.| -You can also see the result of this Resource Graph query in the portal with Azure Resource Graph Explorer: [portal.azure.com](https://portal.azure.com/?feature.customportal=false#blade/HubsExtension/ArgQueryBlade/query/alertsmanagementresources%0A%7C%20where%20properties.essentials.lastModifiedDateTime%20%3E%20ago(1d)%0A%7C%20project%20alertInstanceId%20%3D%20id%2C%20parentRuleId%20%3D%20tolower(tostring(properties%5B'essentials'%5D%5B'alertRule'%5D))%2C%20sourceId%20%3D%20properties%5B'essentials'%5D%5B'sourceCreatedId'%5D%2C%20alertName%20%3D%20name%2C%20severity%20%3D%20properties.essentials.severity%2C%20status%20%3D%20properties.essentials.monitorCondition%2C%20state%20%3D%20properties.essentials.alertState%2C%20affectedResource%20%3D%20properties.essentials.targetResourceName%2C%20monitorService%20%3D%20properties.essentials.monitorService%2C%20signalType%20%3D%20properties.essentials.signalType%2C%20firedTime%20%3D%20properties%5B'essentials'%5D%5B'startDateTime'%5D%2C%20lastModifiedDate%20%3D%20properties.essentials.lastModifiedDateTime%2C%20lastModifiedBy%20%3D%20properties.essentials.lastModifiedUserName) +When the alert is considered resolved, the alert rule sends out a resolved notification using webhooks or email and the monitor state in the Azure portal is set to resolved. -You can also use the [Alert Management REST API](/rest/api/monitor/alertsmanagement/alerts) in lower scale querying scenarios or to update fired alerts. +## Manage your alerts programmatically -## Smart groups +You can programmatically query for alerts using: + - [Azure PowerShell](/powershell/module/az.monitor/) + - [The Azure CLI](/cli/azure/monitor?view=azure-cli-latest&preserve-view=true) + - The [Alert Management REST API](/rest/api/monitor/alertsmanagement/alerts) +You can also use [Resource Graphs](https://portal.azure.com/?feature.customportal=false#blade/HubsExtension/ArgQueryBlade). Resource graphs are good for managing alerts across multiple subscriptions. -Smart groups are aggregations of alerts based on machine learning algorithms, which can help reduce alert noise and aid in troubleshooting. [Learn more about Smart Groups](./alerts-smartgroups-overview.md?toc=%2fazure%2fazure-monitor%2ftoc.json) and [how to manage your smart groups](./alerts-managing-smart-groups.md?toc=%2fazure%2fazure-monitor%2ftoc.json). +## Pricing +See the [Azure Monitor pricing page](https://azure.microsoft.com/pricing/details/monitor/) for information about pricing. ## Next steps -- [Learn more about Smart Groups](./alerts-smartgroups-overview.md?toc=%2fazure%2fazure-monitor%2ftoc.json) +- [See your alert instances](./alerts-page.md) +- [Create a new alert rule](alerts-log.md) - [Learn about action groups](../alerts/action-groups.md) -- [Managing your alert instances in Azure](./alerts-managing-alert-instances.md?toc=%2fazure%2fazure-monitor%2ftoc.json) -- [Managing Smart Groups](./alerts-managing-smart-groups.md?toc=%2fazure%2fazure-monitor%2ftoc.json) -- [Learn more about Azure alerts pricing](https://azure.microsoft.com/pricing/details/monitor/) +- [Learn about alert processing rules](alerts-action-rules.md) diff --git a/articles/azure-monitor/alerts/alerts-managing-alert-instances.md b/articles/azure-monitor/alerts/alerts-page.md similarity index 57% rename from articles/azure-monitor/alerts/alerts-managing-alert-instances.md rename to articles/azure-monitor/alerts/alerts-page.md index 47d497157b30..c5b446cbaac8 100644 --- a/articles/azure-monitor/alerts/alerts-managing-alert-instances.md +++ b/articles/azure-monitor/alerts/alerts-page.md @@ -1,40 +1,35 @@ --- -title: Manage alert instances in Azure Monitor -description: Managing alert instances across Azure +title: View and manage your alert instances +description: The alerts page summarizes all alert instances in all your Azure resources generated in the last 30 days. ms.topic: conceptual ms.date: 2/23/2022 +ms.reviewer: harelb --- -# Manage alert instances with unified alerts +# View and manage your alert instances -With the [unified alerts experience](./alerts-overview.md) in Azure Monitor, you can see all your different types of alerts across Azure. Unified alerts span multiple subscriptions in a single pane. This article shows how you can view your alert instances, and how to find specific alert instances for troubleshooting. +The alerts page summarizes all alert instances in all your Azure resources generated in the last 30 days. You can see all your different types of alerts from multiple subscriptions in a single pane, and you can find specific alert instances for troubleshooting purposes. -> [!NOTE] -> You can only access alerts generated in the last 30 days. - -## Go to the alerts page - -You can go to the alerts page in any of the following ways: +You can get to the alerts page in any of the following ways: -- In the [Azure portal](https://portal.azure.com/), select **Monitor** > **Alerts**. +- From the home page in the [Azure portal](https://portal.azure.com/), select **Monitor** > **Alerts**. - ![Screenshot of Monitor Alerts](media/alerts-managing-alert-instances/monitoring-alerts-managing-alert-instances-toc.jpg) + :::image type="content" source="media/alerts-managing-alert-instances/alerts-monitor-menu.png" alt-text="Screenshot of alerts link on monitor menu. "::: -- Use the context of a specific resource. Open a resource, go to the **Monitoring** section, and choose **Alerts**. The landing page is pre-filtered for alerts on that specific resource. +- From a specific resource, go to the **Monitoring** section, and choose **Alerts**. The landing page is pre-filtered for alerts on that specific resource. - ![Screenshot of resource Monitoring Alerts](media/alerts-managing-alert-instances/alert-resource.JPG) + :::image type="content" source="media/alerts-managing-alert-instances/alerts-resource-menu.png" alt-text="Screenshot of alerts link on a resource's menu."::: +## Alert rule recommendations (preview) -## The alerts page - -The **Alerts** page summarizes all your alert instances across Azure. -### Alert Recommendations (preview) > [!NOTE] > The alert rule recommendations feature is currently in preview and is only enabled for VMs. If you don't have alert rules defined for the selected resource, either individually or as part of a resource group or subscription, you can [create a new alert rule](alerts-log.md#create-a-new-log-alert-rule-in-the-azure-portal), or [enable recommended out-of-the-box alert rules in the Azure portal (preview)](alerts-log.md#enable-recommended-out-of-the-box-alert-rules-in-the-azure-portal-preview). :::image type="content" source="media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg" alt-text="Screenshot of alerts page with link to recommended alert rules."::: -### Alerts summary pane + +## The alerts summary pane + If you have alerts configured for this resource, the alerts summary pane summarizes the alerts fired in the last 24 hours. You can modify the list of alert instances by selecting filters such as **time range**, **subscription**, **alert condition**, **severity**, and more. Select an alert instance. To see more details about a specific alert instance, select the alerts instance to open the **Alert Details** page. @@ -44,6 +39,12 @@ To see more details about a specific alert instance, select the alerts instance :::image type="content" source="media/alerts-managing-alert-instances/alerts-page.png" alt-text="Screenshot of alerts page."::: ## The alerts details page + The **Alerts details** page provides details about the selected alert. Select **Change user response** to change the user response to the alert. You can see all closed alerts in the **History** tab. :::image type="content" source="media/alerts-managing-alert-instances/alerts-details-page.png" alt-text="Screenshot of alerts details page."::: + +## Next steps + +- [Learn about Azure Monitor alerts](./alerts-overview.md) +- [Create a new alert rule](alerts-log.md) \ No newline at end of file diff --git a/articles/azure-monitor/alerts/alerts-troubleshoot-metric.md b/articles/azure-monitor/alerts/alerts-troubleshoot-metric.md index 72e45fbbd7b2..aba573d2ff37 100644 --- a/articles/azure-monitor/alerts/alerts-troubleshoot-metric.md +++ b/articles/azure-monitor/alerts/alerts-troubleshoot-metric.md @@ -4,7 +4,7 @@ description: Common issues with Azure Monitor metric alerts and possible solutio author: harelbr ms.author: harelbr ms.topic: troubleshooting -ms.date: 2/23/2022 +ms.date: 5/25/2022 --- # Troubleshooting problems in Azure Monitor metric alerts @@ -233,6 +233,16 @@ To create a metric alert rule, you’ll need to have the following permissions: - Write permission on the resource group in which the alert rule is created (if you’re creating the alert rule from the Azure portal, the alert rule is created by default in the same resource group in which the target resource resides) - Read permission on any action group associated to the alert rule (if applicable) +## Subscription registration to the Microsoft.Insights resource provider + +Metric alerts can only access resources in subscriptions registered to the Microsoft.Insights resource provider. +Therefore, to create a metric alert rule, all involved subscriptions must be registered to this resource provider: + +- The subscription containing the alert rule's target resource (scope) +- The subscription containing the action groups associated with the alert rule (if defined) +- The subscription in which the alert rule is saved + +Learn more about [registering resource providers](../../azure-resource-manager/management/resource-providers-and-types.md). ## Naming restrictions for metric alert rules @@ -383,4 +393,4 @@ The table below lists the metrics that aren't supported by dynamic thresholds. ## Next steps -- For general troubleshooting information about alerts and notifications, see [Troubleshooting problems in Azure Monitor alerts](alerts-troubleshoot.md). +- For general troubleshooting information about alerts and notifications, see [Troubleshooting problems in Azure Monitor alerts](alerts-troubleshoot.md). \ No newline at end of file diff --git a/articles/azure-monitor/alerts/alerts-types.md b/articles/azure-monitor/alerts/alerts-types.md new file mode 100644 index 000000000000..3dd7e79e2b7d --- /dev/null +++ b/articles/azure-monitor/alerts/alerts-types.md @@ -0,0 +1,180 @@ +--- +title: Types of Azure Monitor Alerts +description: This article explains the different types of Azure Monitor alerts and when to use each type. +author: AbbyMSFT +ms.author: abbyweisberg +ms.topic: conceptual +ms.date: 04/26/2022 +ms.custom: template-concept +ms.reviewer: harelb +--- + +# Types of Azure Monitor alerts + +This article describes the kinds of Azure Monitor alerts you can create, and helps you understand when to use each type of alert. + +There are four types of alerts: +- [Metric alerts](#metric-alerts) +- [Log alerts](#log-alerts) +- [Activity log alerts](#activity-log-alerts) +- [Smart detection alerts](#smart-detection-alerts) + +## Choosing the right alert type + +This table can help you decide when to use what type of alert. For more detailed information about pricing, see the [pricing page](https://azure.microsoft.com/pricing/details/monitor/). + +|Alert Type |When to Use |Pricing Information| +|---------|---------|---------| +|Metric alert|Metric alerts are useful when you want to be alerted about data that requires little or no manipulation. Metric data is stored in the system already pre-computed, so metric alerts are less expensive than log alerts. If the data you want to monitor is available in metric data, you would want to metric alerts.|Each metrics alert rule is charged based on the number of time-series that are monitored. | +|Log alert|Log alerts allow you to perform advanced logic operations on your data. If the data you want to monitor is available in logs, or requires advanced logic, you can use the robust features of KQL for data manipulation using log alerts. Log alerts are more expensive than metric alerts.|Each Log Alert rule is billed based the interval at which the log query is evaluated (more frequent query evaluation results in a higher cost). Additionally, for Log Alerts configured for [at scale monitoring](#splitting-by-dimensions-in-log-alert-rules), the cost will also depend on the number of time series created by the dimensions resulting from your query. | +|Activity Log alert|Activity logs provide auditing of all actions that occurred on resources. Use activity log alerts if you want to be alerted when a specific event happens to a resource, for example, a restart, a shutdown, or the creation or deletion of a resource.|For more information, see the [pricing page](https://azure.microsoft.com/pricing/details/monitor/).| + +## Metric alerts + +A metric alert rule monitors a resource by evaluating conditions on the resource metrics at regular intervals. If the conditions are met, an alert is fired. A metric time-series is a series of metric values captured over a period of time. + +You can create rules using these metrics: +- [Platform metrics](alerts-metric-near-real-time.md#metrics-and-dimensions-supported) +- [Custom metrics](../essentials/metrics-custom-overview.md) +- [Application Insights custom metrics](../app/api-custom-events-metrics.md) +- [Selected logs from a Log Analytics workspace converted to metrics](alerts-metric-logs.md) + +Metric alert rules include these features: +- You can use multiple conditions on an alert rule for a single resource. +- You can add granularity by [monitoring multiple metric dimensions](#narrow-the-target-using-dimensions). +- You can use [Dynamic thresholds](#dynamic-thresholds) driven by machine learning. +- You can configure if metric alerts are [stateful or stateless](alerts-overview.md#alerts-and-state). Metric alerts are stateful by default. + +The target of the metric alert rule can be: +- A single resource, such as a VM. See this article for supported resource types. +- [Multiple resources](#monitor-multiple-resources) of the same type in the same Azure region, such as a resource group. + +### Multiple conditions + +When you create an alert rule for a single resource, you can apply multiple conditions. For example, you could create an alert rule to monitor an Azure virtual machine and alert when both "Percentage CPU is higher than 90%" and "Queue length is over 300 items". When an alert rule has multiple conditions, the alert fires when all the conditions in the alert rule are true and is resolved when at least one of the conditions is no longer true for three consecutive checks. +### Narrow the target using Dimensions + +Dimensions are name-value pairs that contain additional data about the metric value. Using dimensions allows you to filter the metrics and monitor specific time-series, instead of monitoring the aggregate of all the dimensional values. +For example, the Transactions metric of a storage account can have an API name dimension that contains the name of the API called by each transaction (for example, GetBlob, DeleteBlob, PutPage). You can choose to have an alert fired when there is a high number of transactions in any API name (which is the aggregated data), or you can use dimensions to further break it down to alert only when the number of transactions is high for specific API names. +If you use more than one dimension, the metric alert rule can monitor multiple dimension values from different dimensions of a metric. +The alert rule separately monitors all the dimensions value combinations. +See [this article](alerts-metric-multiple-time-series-single-rule.md) for detailed instructions on using dimensions in metric alert rules. + +### Create resource-centric alerts using splitting by dimensions + +To monitor for the same condition on multiple Azure resources, you can use splitting by dimensions. Splitting by dimensions allows you to create resource-centric alerts at scale for a subscription or resource group. Alerts are split into separate alerts by grouping combinations. Splitting on Azure resource ID column makes the specified resource into the alert target. + +You may also decide not to split when you want a condition applied to multiple resources in the scope. For example, if you want to fire an alert if at least five machines in the resource group scope have CPU usage over 80%. + +### Monitor multiple resources + +You can monitor at scale by applying the same metric alert rule to multiple resources of the same type for resources that exist in the same Azure region. Individual notifications are sent for each monitored resource. + +These platform metrics for these services in the following Azure clouds are supported: + +| Service | Global Azure | Government | China | +|:-----------------------------|:-------------|:-----------|:--------| +| Virtual machines* | Yes |Yes | Yes | +| SQL server databases | Yes | Yes | Yes | +| SQL server elastic pools | Yes | Yes | Yes | +| NetApp files capacity pools | Yes | Yes | Yes | +| NetApp files volumes | Yes | Yes | Yes | +| Key vaults | Yes | Yes | Yes | +| Azure Cache for Redis | Yes | Yes | Yes | +| Azure Stack Edge devices | Yes | Yes | Yes | +| Recovery Services vaults | Yes | No | No | + + > [!NOTE] + > Platform metrics are not supported for virtual machine network metrics (Network In Total, Network Out Total, Inbound Flows, Outbound Flows, Inbound Flows Maximum Creation Rate, Outbound Flows Maximum Creation Rate). + +You can specify the scope of monitoring with a single metric alert rule in one of three ways. For example, with virtual machines you can specify the scope as: + +- a list of virtual machines (in one Azure region) within a subscription +- all virtual machines (in one Azure region) in one or more resource groups in a subscription +- all virtual machines (in one Azure region) in a subscription + +### Dynamic thresholds + +Dynamic thresholds use advanced machine learning (ML) to: +- Learn the historical behavior of metrics +- Identify patterns and adapt to metric changes over time, such as hourly, daily or weekly patterns. +- Recognize anomalies that indicate possible service issues +- Calculate the most appropriate threshold for the metric + +Machine Learning continuously uses new data to learn more and make the threshold more accurate. Because the system adapts to the metrics’ behavior over time, and alerts based on deviations from its pattern, you don't have to know the "right" threshold for each metric. + +Dynamic thresholds help you: +- Create scalable alerts for hundreds of metric series with one alert rule. Fewer alert rules leads to to less time that you have to spend on creating and managing alerts rules. +- Create rules without having to know what threshold to configure +- Configure up metric alerts using high-level concepts without extensive domain knowledge about the metric +- Prevent noisy (low precision) or wide (low recall) thresholds that don’t have an expected pattern +- Handle noisy metrics (such as machine CPU or memory) and metrics with low dispersion (such as availability and error rate). + +See [this article](alerts-dynamic-thresholds.md) for detailed instructions on using dynamic thresholds in metric alert rules. + +## Log alerts +A log alert rule monitors a resource by using a Log Analytics query to evaluate resource logs at a set frequency. If the conditions are met, an alert is fired. Because you can use Log Analytics queries, log alerts allow you to perform advanced logic operations on your data and to use the robust features of KQL for data manipulation of log data. + +The target of the log alert rule can be: +- A single resource, such as a VM. +- Multiple resources of the same type in the same Azure region, such as a resource group. This is currently available for selected resource types. +- Multiple resources using [cross-resource query](../logs/cross-workspace-query.md#querying-across-log-analytics-workspaces-and-from-application-insights). + +Log alerts can measure two different things, which can be used for different monitoring scenarios: +- Table rows: The number of rows returned can be used to work with events such as Windows event logs, syslog, application exceptions. +- Calculation of a numeric column: Calculations based on any numeric column can be used to include any number of resources. For example, CPU percentage. + +You can configure if log alerts are [stateful or stateless](alerts-overview.md#alerts-and-state) (currently in preview). + +> [!NOTE] +> Log alerts work best when you are trying to detect specific data in the logs, as opposed to when you are trying to detect a **lack** of data in the logs. Since logs are semi-structured data, they are inherently more latent than metric data on information like a VM heartbeat. To avoid misfires when you are trying to detect a lack of data in the logs, consider using [metric alerts](#metric-alerts). You can send data to the metric store from logs using [metric alerts for logs](alerts-metric-logs.md). + +### Dimensions in log alert rules +You can use dimensions when creating log alert rules to monitor the values of multiple instances of a resource with one rule. For example, you can monitor CPU usage on multiple instances running your website or app. Each instance is monitored individually notifications are sent for each instance. + +### Splitting by dimensions in log alert rules +To monitor for the same condition on multiple Azure resources, you can use splitting by dimensions. Splitting by dimensions allows you to create resource-centric alerts at scale for a subscription or resource group. Alerts are split into separate alerts by grouping combinations using numerical or string columns. Splitting on the Azure resource ID column makes the specified resource into the alert target. +You may also decide not to split when you want a condition applied to multiple resources in the scope. For example, if you want to fire an alert if at least five machines in the resource group scope have CPU usage over 80%. + +### Using the API +Manage new rules in your workspaces using the [ScheduledQueryRules](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules) API. + +> [!NOTE] +> Log alerts for Log Analytics used to be managed using the legacy [Log Analytics Alert API](api-alerts.md). Learn more about [switching to the current ScheduledQueryRules API](alerts-log-api-switch.md). +## Log alerts on your Azure bill +Log Alerts are listed under resource provider microsoft.insights/scheduledqueryrules with: +- Log Alerts on Application Insights shown with exact resource name along with resource group and alert properties. +- Log Alerts on Log Analytics shown with exact resource name along with resource group and alert properties; when created using scheduledQueryRules API. +- Log alerts created from [legacy Log Analytics API](./api-alerts.md) aren't tracked [Azure Resources](../../azure-resource-manager/management/overview.md) and don't have enforced unique resource names. These alerts are still created on `microsoft.insights/scheduledqueryrules` as hidden resources, which have this resource naming structure `|||`. Log Alerts on legacy API are shown with above hidden resource name along with resource group and alert properties. +> [!Note] +> Unsupported resource characters such as <, >, %, &, \, ?, / are replaced with _ in the hidden resource names and this will also reflect in the billing information. +## Activity log alerts +An activity log alert monitors a resource by checking the activity logs for a new activity log event that matches the defined conditions. + +You may want to use activity log alerts for these types of scenarios: +- When a specific operation occurs on resources in a specific resource group or subscription. For example, you may want to be notified when: + - Any virtual machine in a production resource group is deleted. + - Any new roles are assigned to a user in your subscription. +- A service health event occurs. Service health events include notifications of incidents and maintenance events that apply to resources in your subscription. + +You can create an activity log alert on: +- Any of the activity log [event categories](../essentials/activity-log-schema.md), other than on alert events. +- Any activity log event in top-level property in the JSON object. + +Activity log alert rules are Azure resources, so they can be created by using an Azure Resource Manager template. They also can be created, updated, or deleted in the Azure portal. + +An activity log alert only monitors events in the subscription in which the alert is created. + +## Smart Detection alerts +After setting up Application Insights for your project, when your app generates a certain minimum amount of data, Smart Detection takes 24 hours to learn the normal behavior of your app. Your app's performance has a typical pattern of behavior. Some requests or dependency calls will be more prone to failure than others; and the overall failure rate may go up as load increases. Smart Detection uses machine learning to find these anomalies. Smart Detection monitors the data received from your app, and in particular the failure rates. Application Insights automatically alerts you in near real time if your web app experiences an abnormal rise in the rate of failed requests. + +As data comes into Application Insights from your web app, Smart Detection compares the current behavior with the patterns seen over the past few days. If there is an abnormal rise in failure rate compared to previous performance, an analysis is triggered. To help you triage and diagnose the problem, an analysis of the characteristics of the failures and related application data is provided in the alert details. There are also links to the Application Insights portal for further diagnosis. The feature needs no set-up nor configuration, as it uses machine learning algorithms to predict the normal failure rate. + +While metric alerts tell you there might be a problem, Smart Detection starts the diagnostic work for you, performing much of the analysis you would otherwise have to do yourself. You get the results neatly packaged, helping you to get quickly to the root of the problem. + +Smart detection works for any web app, hosted in the cloud or on your own servers, that generate application request or dependency data. + +## Next steps +- Get an [overview of alerts](alerts-overview.md). +- [Create an alert rule](alerts-log.md). +- Learn more about [Smart Detection](../app/proactive-failure-diagnostics.md). diff --git a/articles/azure-monitor/alerts/alerts-unified-log.md b/articles/azure-monitor/alerts/alerts-unified-log.md deleted file mode 100644 index ad364ec35322..000000000000 --- a/articles/azure-monitor/alerts/alerts-unified-log.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Log alerts in Azure Monitor -description: Trigger emails, notifications, call websites URLs (webhooks), or automation when the log query condition you specify is met -author: yanivlavi -ms.author: yalavi -ms.topic: conceptual -ms.date: 2/23/2022 ---- - -# Log alerts in Azure Monitor - -## Overview - -Log alerts are one of the alert types that are supported in [Azure Alerts](./alerts-overview.md). Log alerts allow users to use a [Log Analytics](../logs/log-analytics-tutorial.md) query to evaluate resources logs every set frequency, and fire an alert based on the results. Rules can trigger one or more actions using [Action Groups](./action-groups.md). - -> [!NOTE] -> Log data from a [Log Analytics workspace](../logs/log-analytics-tutorial.md) can be sent to the Azure Monitor metrics store. Metrics alerts have [different behavior](alerts-metric-overview.md), which may be more desirable depending on the data you are working with. For information on what and how you can route logs to metrics, see [Metric Alert for Logs](alerts-metric-logs.md). - -## Prerequisites - -Log alerts run queries on Log Analytics data. First you should start [collecting log data](../essentials/resource-logs.md) and query the log data for issues. You can use the [alert query examples topic](../logs/queries.md) in Log Analytics to understand what you can discover or [get started on writing your own query](../logs/log-analytics-tutorial.md). - -[Azure Monitoring Contributor](../roles-permissions-security.md) is a common role that is needed for creating, modifying, and updating log alerts. Access & query execution rights for the resource logs are also needed. Partial access to resource logs can fail queries or return partial results. [Learn more about configuring log alerts in Azure](./alerts-log.md). - -> [!NOTE] -> Log alerts for Log Analytics used to be managed using the legacy [Log Analytics Alert API](./api-alerts.md). [Learn more about switching to the current ScheduledQueryRules API](../alerts/alerts-log-api-switch.md). - -## Query evaluation definition - -Log search rules condition definition starts from: - -- What query to run? -- How to use the results? - -The following sections describe the different parameters you can use to set the above logic. - -### Log query -The [Log Analytics](../logs/log-analytics-tutorial.md) query used to evaluate the rule. The results returned by this query are used to determine whether an alert is to be triggered. The query can be scoped to: - -- A specific resource, such as a virtual machine. -- An at scale resource, such as a subscription or resource group. -- Multiple resources using [cross-resource query](../logs/cross-workspace-query.md#querying-across-log-analytics-workspaces-and-from-application-insights). - -> [!IMPORTANT] -> Alert queries have constraints to ensure optimal performance and the relevance of the results. [Learn more here](./alerts-log-query.md). - -> [!IMPORTANT] -> Resource centric and [cross-resource query](../logs/cross-workspace-query.md#querying-across-log-analytics-workspaces-and-from-application-insights) are only supported using the current scheduledQueryRules API. If you use the legacy [Log Analytics Alert API](./api-alerts.md), you will need to switch. [Learn more about switching](./alerts-log-api-switch.md) - -#### Query time Range - -Time range is set in the rule condition definition. It's called **Override query time range** in the advance settings section. - -Unlike log analytics, the time range in alerts is limited to a maximum of two days of data. Even if longer range **ago** command is used in the query, the time range will apply. For example, a query scans up to 2 days, even if the text contains **ago(7d)**. - -If you use **ago** command in the query, the range is automatically set to two days. You can also change time range manually in cases the query requires more data than the alert evaluation even if there is no **ago** command in the query. - -### Measure - -Log alerts turn log into numeric values that can be evaluated. You can measure two different things: -* Result count -* Calculation of a value - -#### Result count - -Count of results is the default measure and is used when you set a **Measure** with a selection of **Table rows**. Ideal for working with events such as Windows event logs, syslog, application exceptions. Triggers when log records happen or doesn't happen in the evaluated time window. - -Log alerts work best when you try to detect data in the log. It works less well when you try to detect lack of data in the logs. For example, alerting on virtual machine heartbeat. - -> [!NOTE] -> Since logs are semi-structured data, they are inherently more latent than metric, you may experience misfires when trying to detect lack of data in the logs, and you should consider using [metric alerts](alerts-metric-overview.md). You can send data to the metric store from logs using [metric alerts for logs](alerts-metric-logs.md). - -##### Example of result count use case - -You want to know when your application responded with error code 500 (Internal Server Error). You would create an alert rule with the following details: - -- **Query:** - -```Kusto -requests -| where resultCode == "500" -``` - -- **Aggregation granularity:** 15 minutes -- **Alert frequency:** 15 minutes -- **Threshold value:** Greater than 0 - -Then alert rules monitors for any requests ending with 500 error code. The query runs every 15 minutes, over the last 15 minutes. If even one record is found, it fires the alert and triggers the actions configured. - -### Calculation of a value - -Calculation of a value is used when you select a column name of a numeric column for the **Measure**, and the result is a calculation that you perform on the values in that column. This would be used, for example, as CPU counter value. -### Aggregation type - -The calculation that is done on multiple records to aggregate them to one numeric value using the [**Aggregation granularity**](#aggregation-granularity) defined. For example: -- **Sum** returns the sum of measure column. -- **Average** returns the average of the measure column. - -### Aggregation granularity - -Determines the interval that is used to aggregate multiple records to one numeric value. For example, if you specified **5 minutes**, records would be grouped by 5-minute intervals using the **Aggregation type** specified. - -> [!NOTE] -> As [bin()](/azure/kusto/query/binfunction) can result in uneven time intervals, the alert service will automatically convert [bin()](/azure/kusto/query/binfunction) function to [bin_at()](/azure/kusto/query/binatfunction) function with appropriate time at runtime, to ensure results with a fixed point. - -### Split by alert dimensions - -Split alerts by number or string columns into separate alerts by grouping into unique combinations. It's configured in **Split by dimensions** section of the condition (limited to six splits). When creating resource-centric alerts at scale (subscription or resource group scope), you can split by Azure resource ID column. Splitting on Azure resource ID column will change the target of the alert to the specified resource. - -Splitting by Azure resource ID column is recommended when you want to monitor the same condition on multiple Azure resources. For example, monitoring all virtual machines for CPU usage over 80%. You may also decide not to split when you want a condition on multiple resources in the scope. Such as monitoring that at least five machines in the resource group scope have CPU usage over 80%. -#### Example of splitting by alert dimensions - -For example, you want to monitor errors for multiple virtual machines running your web site/app in a specific resource group. You can do that using a log alert rule as follows: - -- **Query:** - - ```Kusto - // Reported errors - union Event, Syslog // Event table stores Windows event records, Syslog stores Linux records - | where EventLevelName == "Error" // EventLevelName is used in the Event (Windows) records - or SeverityLevel== "err" // SeverityLevel is used in Syslog (Linux) records - ``` - -- **Resource ID Column:** _ResourceId -- **Dimensions:** - - Computer = VM1, VM2 (Filtering values in alert rules definition isn't available currently for workspaces and Application Insights. Filter in the query text.) -- **Aggregation granularity:** 15 minutes -- **Alert frequency:** 15 minutes -- **Threshold value:** Greater than 0 - -This rule monitors if any virtual machine had error events in the last 15 minutes. Each virtual machine is monitored separately and will trigger actions individually. - -> [!NOTE] -> Split by alert dimensions is only available for the current scheduledQueryRules API. If you use the legacy [Log Analytics Alert API](./api-alerts.md), you will need to switch. [Learn more about switching](./alerts-log-api-switch.md). Resource centric alerting at scale is only supported in the API version `2021-08-01` and above. - -## Alert logic definition - -Once you define the query to run and evaluation of the results, you need to define the alerting logic and when to fire actions. The following sections describe the different parameters you can use: - -### Threshold and operator - -The query results are transformed into a number that is compared against the threshold and operator. - -### Frequency - -The interval in which the query is run. Can be set from a minute to a day. - -### Number of violations to trigger alert - -You can specify the alert evaluation period and the number of failures needed to trigger an alert. Allowing you to better define an impact time to trigger an alert. - -For example, if your rule [**Aggregation granularity**](#aggregation-granularity) is defined as '5 minutes', you can trigger an alert only if three failures (15 minutes) of the last hour occurred. This setting is defined by your application business policy. - -## State and resolving alerts - -Log alerts can either be stateless or stateful (currently in preview). - -Stateless alerts fire each time the condition is met, even if fired previously. You can [mark the alert as closed](../alerts/alerts-managing-alert-states.md) once the alert instance is resolved. You can also mute actions to prevent them from triggering for a period after an alert rule fired using the **Mute Actions** option in the alert details section. - -See this alert stateless evaluation example: - -| Time | Log condition evaluation | Result -| ------- | ----------| ----------| ------- -| 00:05 | FALSE | Alert doesn't fire. No actions called. -| 00:10 | TRUE | Alert fires and action groups called. New alert state ACTIVE. -| 00:15 | TRUE | Alert fires and action groups called. New alert state ACTIVE. -| 00:20 | FALSE | Alert doesn't fire. No actions called. Pervious alerts state remains ACTIVE. - -Stateful alerts fire once per incident and resolve. The alert rule resolves when the alert condition isn't met for 30 minutes for a specific evaluation period (to account for [log ingestion delay](../alerts/alerts-troubleshoot-log.md#data-ingestion-time-for-logs)), and for three consecutive evaluations to reduce noise if there is flapping conditions. For example, with a frequency of 5 minutes, the alert resolve after 40 minutes or with a frequency of 1 minute, the alert resolve after 32 minutes. The resolved notification is sent out via web-hooks or email, the status of the alert instance (called monitor state) in Azure portal is also set to resolved. - -Stateful alerts feature is currently in preview. You can set this using **Automatically resolve alerts** in the alert details section. - -## Location selection in log alerts - -Log alerts allow you to set a location for alert rules. You can select any of the supported locations, which align to [Log Analytics supported region list](https://azure.microsoft.com/global-infrastructure/services/?products=monitor). - -Location affects which region the alert rule is evaluated in. Queries are executed on the log data in the selected region, that said, the alert service end to end is global. Meaning alert rule definition, fired alerts, notifications, and actions aren't bound to the location in the alert rule. Data is transfer from the set region since the Azure Monitor alerts service is a [non-regional service](https://azure.microsoft.com/global-infrastructure/services/?products=monitor®ions=non-regional). - -## Pricing model - -Each Log Alert rule is billed based the interval at which the log query is evaluated (more frequent query evaluation results in a higher cost). Additionally, for Log Alerts configured for [at scale monitoring](#split-by-alert-dimensions), the cost will also depend on the number of time series created by the dimensions resulting from your query. - -Prices for Log Alert rules are available on the [Azure Monitor pricing page](https://azure.microsoft.com/pricing/details/monitor/). - -### Calculating the price for a Log Alert rule without dimensions - -The price of an alert rule which queries 1 resource event every 15-minutes can be calculated as: - -Total monthly price = 1 resource * 1 log alert rule * price per 15-minute internal log alert rule per month. - -### Calculating the price for a Log Alert rule with dimensions - -The price of an alert rule which monitors 10 VM resources at 1-minute frequency, using resource centric log monitoring, can be calculated as Price of alert rule + Price of number of dimensions. For example: - -Total monthly price = price per 1-minute log alert rule per month + ( 10 time series - 1 included free time series ) * price per 1-min interval monitored per month. - -Pricing of at scale log monitoring is applicable from Scheduled Query Rules API version 2021-02-01. - -## View log alerts usage on your Azure bill - -Log Alerts are listed under resource provider `microsoft.insights/scheduledqueryrules` with: - -- Log Alerts on Application Insights shown with exact resource name along with resource group and alert properties. -- Log Alerts on Log Analytics shown with exact resource name along with resource group and alert properties; when created using [scheduledQueryRules API](/rest/api/monitor/scheduledqueryrule-2021-08-01/scheduled-query-rules). -- Log alerts created from [legacy Log Analytics API](./api-alerts.md) aren't tracked [Azure Resources](../../azure-resource-manager/management/overview.md) and don't have enforced unique resource names. These alerts are still created on `microsoft.insights/scheduledqueryrules` as hidden resources, which have this resource naming structure `|||`. Log Alerts on legacy API are shown with above hidden resource name along with resource group and alert properties. - -> [!NOTE] -> Unsupported resource characters such as `<, >, %, &, \, ?, /` are replaced with `_` in the hidden resource names and this will also reflect in the billing information. - -> [!NOTE] -> Log alerts for Log Analytics used to be managed using the legacy [Log Analytics Alert API](./api-alerts.md) and legacy templates of [Log Analytics saved searches and alerts](../insights/solutions.md). [Learn more about switching to the current ScheduledQueryRules API](../alerts/alerts-log-api-switch.md). Any alert rule management should be done using [legacy Log Analytics API](./api-alerts.md) until you decide to switch and you can't use the hidden resources. - -## Next steps - -* Learn about [creating in log alerts in Azure](./alerts-log.md). -* Understand [webhooks in log alerts in Azure](../alerts/alerts-log-webhook.md). -* Learn about [Azure Alerts](./alerts-overview.md). -* Learn more about [Log Analytics](../logs/log-query-overview.md). diff --git a/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-dimensions.png b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-dimensions.png new file mode 100644 index 000000000000..53fc9a8288c1 Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-dimensions.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-logic.png b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-logic.png new file mode 100644 index 000000000000..35e1abdd9535 Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-log-rule-logic.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-log/alerts-create-new-alert-rule-expanded.png b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-new-alert-rule-expanded.png index 31fc297f07f5..95217dc8a334 100644 Binary files a/articles/azure-monitor/alerts/media/alerts-log/alerts-create-new-alert-rule-expanded.png and b/articles/azure-monitor/alerts/media/alerts-log/alerts-create-new-alert-rule-expanded.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-monitor-menu.png b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-monitor-menu.png new file mode 100644 index 000000000000..a188f6f8bfc4 Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-monitor-menu.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-resource-menu.png b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-resource-menu.png new file mode 100644 index 000000000000..53585e8aa47e Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/alerts-resource-menu.png differ diff --git a/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg index af87e87e78d5..a530db5c2d50 100644 Binary files a/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg and b/articles/azure-monitor/alerts/media/alerts-managing-alert-instances/enable-recommended-alert-rules.jpg differ diff --git a/articles/azure-monitor/alerts/media/alerts-overview/alerts-flow.png b/articles/azure-monitor/alerts/media/alerts-overview/alerts-flow.png new file mode 100644 index 000000000000..160d3032da89 Binary files /dev/null and b/articles/azure-monitor/alerts/media/alerts-overview/alerts-flow.png differ diff --git a/articles/azure-monitor/app/app-map.md b/articles/azure-monitor/app/app-map.md index 9547c8becf2e..300bb70072b9 100644 --- a/articles/azure-monitor/app/app-map.md +++ b/articles/azure-monitor/app/app-map.md @@ -325,7 +325,10 @@ Adjust sensitivity to achieve the desired confidence level in highlighted edges. ### Limitations of Intelligent View -The Intelligent View works well for large distributed applications but sometimes it can take around one minute to load. +* Large distributed applications may take a minute to load Intelligent View. +* Timeframes of up to seven days are supported. + +We would love to hear your feedback. ([Portal feedback](#portal-feedback)) ## Troubleshooting @@ -379,7 +382,10 @@ In a case where an edge is highlighted the explanation from the model should poi #### Intelligent View doesn't load -If Intelligent View doesn't load, ensure that you've opted into the preview on Application Map. +Follow these steps if Intelligent View doesn't load. + +1. Set the configured time frame to six days or less. +1. The `Try preview` button must be selected to opt in. :::image type="content" source="media/app-map/intelligent-view-try-preview.png" alt-text="Screenshot of the Application Map user interface preview opt-in button." lightbox="media/app-map/intelligent-view-try-preview.png"::: diff --git a/articles/azure-monitor/app/azure-web-apps-java.md b/articles/azure-monitor/app/azure-web-apps-java.md index 2f4c163c6134..c6f5f4fe81da 100644 --- a/articles/azure-monitor/app/azure-web-apps-java.md +++ b/articles/azure-monitor/app/azure-web-apps-java.md @@ -19,9 +19,7 @@ You can apply additional configurations, and then based on your specific scenari ### Auto-instrumentation through Azure portal -You can turn on monitoring for your Java apps running in Azure App Service just with one click, no code change required. -Application Insights for Java is integrated with Azure App Service on Linux - both code-based and custom containers, and with App Service on Windows for code-based apps. -The integration adds [Application Insights Java 3.x](./java-in-process-agent.md) and you will get the telemetry auto-collected. +You can turn on monitoring for your Java apps running in Azure App Service just with one click, no code change required. The integration adds [Application Insights Java 3.x](./java-in-process-agent.md) and you will get the telemetry auto-collected. 1. **Select Application Insights** in the Azure control panel for your app service, then select **Enable**. @@ -102,4 +100,4 @@ For the latest updates and bug fixes, [consult the release notes](web-app-extens * [Monitor service health metrics](../data-platform.md) to make sure your service is available and responsive. * [Receive alert notifications](../alerts/alerts-overview.md) whenever operational events happen or metrics cross a threshold. * Use [Application Insights for JavaScript apps and web pages](javascript.md) to get client telemetry from the browsers that visit a web page. -* [Set up Availability web tests](monitor-web-app-availability.md) to be alerted if your site is down. \ No newline at end of file +* [Set up Availability web tests](monitor-web-app-availability.md) to be alerted if your site is down. diff --git a/articles/azure-monitor/app/codeless-overview.md b/articles/azure-monitor/app/codeless-overview.md index 65868c2b155d..98b431f0cb3c 100644 --- a/articles/azure-monitor/app/codeless-overview.md +++ b/articles/azure-monitor/app/codeless-overview.md @@ -21,7 +21,8 @@ As we're adding new integrations, the auto-instrumentation capability matrix bec |Environment/Resource Provider | .NET | .NET Core | Java | Node.js | Python | |---------------------------------------|-----------------|-----------------|-----------------|-----------------|-----------------| -|Azure App Service on Windows | GA, OnBD* | GA, opt-in | Public Preview, Container and Custom Containers are GA | Public Preview | Not supported | +|Azure App Service on Windows - Publish as Code | GA, OnBD* | GA | GA | GA, OnBD* | Not supported | +|Azure App Service on Windows - Publish as Docker | Public Preview | Public Preview | Public Preview | Not supported | Not supported | |Azure App Service on Linux | N/A | Public Preview | GA | GA | Not supported | |Azure Functions - basic | GA, OnBD* | GA, OnBD* | GA, OnBD* | GA, OnBD* | GA, OnBD* | |Azure Functions - dependencies | Not supported | Not supported | Public Preview | Not supported | Through [extension](monitor-functions.md#distributed-tracing-for-python-function-apps) | diff --git a/articles/azure-monitor/app/convert-classic-resource.md b/articles/azure-monitor/app/convert-classic-resource.md index 72d0d8cb1ebf..806f07bd3221 100644 --- a/articles/azure-monitor/app/convert-classic-resource.md +++ b/articles/azure-monitor/app/convert-classic-resource.md @@ -40,7 +40,7 @@ If you don't need to migrate an existing resource, and instead want to create a - A Log Analytics workspace with the access control mode set to the **`use resource or workspace permissions`** setting. - - Workspace-based Application Insights resources aren't compatible with workspaces set to the dedicated **`workspace based permissions`** setting. To learn more about Log Analytics workspace access control, consult the [Log Analytics configure access control mode guidance](../logs/manage-access.md#configure-access-control-mode) + - Workspace-based Application Insights resources aren't compatible with workspaces set to the dedicated **`workspace based permissions`** setting. To learn more about Log Analytics workspace access control, consult the [access control mode guidance](../logs/manage-access.md#access-control-mode) - If you don't already have an existing Log Analytics Workspace, [consult the Log Analytics workspace creation documentation](../logs/quick-create-workspace.md). @@ -224,7 +224,7 @@ From within the Application Insights resource pane, select **Properties** > **Ch **Error message:** *The selected workspace is configured with workspace-based access mode. Some APM features may be impacted. Select another workspace or allow resource-based access in the workspace settings. You can override this error by using CLI.* -In order for your workspace-based Application Insights resource to operate properly you need to change the access control mode of your target Log Analytics workspace to the **resource or workspace permissions** setting. This setting is located in the Log Analytics workspace UI under **Properties** > **Access control mode**. For detailed instructions, consult the [Log Analytics configure access control mode guidance](../logs/manage-access.md#configure-access-control-mode). If your access control mode is set to the exclusive **Require workspace permissions** setting, migration via the portal migration experience will remain blocked. +In order for your workspace-based Application Insights resource to operate properly you need to change the access control mode of your target Log Analytics workspace to the **resource or workspace permissions** setting. This setting is located in the Log Analytics workspace UI under **Properties** > **Access control mode**. For detailed instructions, consult the [Log Analytics configure access control mode guidance](../logs/manage-access.md#access-control-mode). If your access control mode is set to the exclusive **Require workspace permissions** setting, migration via the portal migration experience will remain blocked. If you can’t change the access control mode for security reasons for your current target workspace, we recommend creating a new Log Analytics workspace to use for the migration. diff --git a/articles/azure-monitor/app/data-model-pageview-telemetry.md b/articles/azure-monitor/app/data-model-pageview-telemetry.md index 41ccaf018571..587ae04d7f39 100644 --- a/articles/azure-monitor/app/data-model-pageview-telemetry.md +++ b/articles/azure-monitor/app/data-model-pageview-telemetry.md @@ -8,10 +8,10 @@ ms.reviewer: vgorbenko # PageView telemetry: Application Insights data model -PageView telemetry (in [Application Insights](./app-insights-overview.md)) is logged when an application user opens a new page of a monitored application. The `Page` in this context is a logical unit that is defined by the developer to be an application tab or a screen and is not necessarily correlated to a browser webpage load or refresh action. This distinction can be further understood in the context of single-page applications (SPA) where the switch between pages is not tied to browser page actions. [`pageViews.duration`](https://docs.microsoft.com/azure/azure-monitor/reference/tables/pageviews) is the time it takes for the application to present the page to the user. +PageView telemetry (in [Application Insights](./app-insights-overview.md)) is logged when an application user opens a new page of a monitored application. The `Page` in this context is a logical unit that is defined by the developer to be an application tab or a screen and is not necessarily correlated to a browser webpage load or refresh action. This distinction can be further understood in the context of single-page applications (SPA) where the switch between pages is not tied to browser page actions. [`pageViews.duration`](/azure/azure-monitor/reference/tables/pageviews) is the time it takes for the application to present the page to the user. > [!NOTE] -> By default, Application Insights SDKs log single PageView events on each browser webpage load action, with [`pageViews.duration`](https://docs.microsoft.com/azure/azure-monitor/reference/tables/pageviews) populated by [browser timing](#measuring-browsertiming-in-application-insights). Developers can extend additional tracking of PageView events by using the [trackPageView API call](./api-custom-events-metrics.md#page-views). +> By default, Application Insights SDKs log single PageView events on each browser webpage load action, with [`pageViews.duration`](/azure/azure-monitor/reference/tables/pageviews) populated by [browser timing](#measuring-browsertiming-in-application-insights). Developers can extend additional tracking of PageView events by using the [trackPageView API call](./api-custom-events-metrics.md#page-views). ## Measuring browserTiming in Application Insights @@ -34,4 +34,4 @@ Modern browsers expose measurements for page load actions with the [Performance * If it’s not, then the *deprecated* [`PerformanceTiming`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming) interface is used and the delta between [`NavigationStart`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming/navigationStart) and [`LoadEventEnd`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming/loadEventEnd) is calculated. * The developer specifies a duration value when logging custom PageView events using the [trackPageView API call](./api-custom-events-metrics.md#page-views). -![Screenshot of the Metrics page in Application Insights showing graphic displays of metrics data for a web application.](./media/javascript/page-view-load-time.png) +![Screenshot of the Metrics page in Application Insights showing graphic displays of metrics data for a web application.](./media/javascript/page-view-load-time.png) \ No newline at end of file diff --git a/articles/azure-monitor/app/proactive-performance-diagnostics.md b/articles/azure-monitor/app/proactive-performance-diagnostics.md index 5c5729bb1bcb..20a0fd8d961c 100644 --- a/articles/azure-monitor/app/proactive-performance-diagnostics.md +++ b/articles/azure-monitor/app/proactive-performance-diagnostics.md @@ -8,7 +8,7 @@ ms.date: 05/04/2017 # Smart detection - Performance Anomalies >[!NOTE] ->You can migrate your Application Insight resources to alerts-bases smart detection (preview). The migration creates alert rules for the different smart detection modules. Once created, you can manage and configure these rules just like any other Azure Monitor alert rules. You can also configure action groups for these rules, thus enabling multiple methods of taking actions or triggering notification on new detections. +>You can migrate your Application Insight resources to alerts-based smart detection (preview). The migration creates alert rules for the different smart detection modules. Once created, you can manage and configure these rules just like any other Azure Monitor alert rules. You can also configure action groups for these rules, thus enabling multiple methods of taking actions or triggering notification on new detections. > > For more information on the migration process, see [Smart Detection Alerts migration](../alerts/alerts-smart-detections-migration.md). diff --git a/articles/azure-monitor/autoscale/autoscale-best-practices.md b/articles/azure-monitor/autoscale/autoscale-best-practices.md index 9578797f1163..64a1aba2f885 100644 --- a/articles/azure-monitor/autoscale/autoscale-best-practices.md +++ b/articles/azure-monitor/autoscale/autoscale-best-practices.md @@ -150,6 +150,14 @@ You can also use an Activity Log alert to monitor the health of the autoscale en In addition to using activity log alerts, you can also configure email or webhook notifications to get notified for scale actions via the notifications tab on the autoscale setting. +## Send data securely using TLS 1.2 +To ensure the security of data in transit to Azure Monitor, we strongly encourage you to configure the agent to use at least Transport Layer Security (TLS) 1.2. Older versions of TLS/Secure Sockets Layer (SSL) have been found to be vulnerable and while they still currently work to allow backwards compatibility, they are **not recommended**, and the industry is quickly moving to abandon support for these older protocols. + +The [PCI Security Standards Council](https://www.pcisecuritystandards.org/) has set a deadline of [June 30th, 2018](https://www.pcisecuritystandards.org/pdfs/PCI_SSC_Migrating_from_SSL_and_Early_TLS_Resource_Guide.pdf) to disable older versions of TLS/SSL and upgrade to more secure protocols. Once Azure drops legacy support, if your agents cannot communicate over at least TLS 1.2 you would not be able to send data to Azure Monitor Logs. + +We recommend you do NOT explicit set your agent to only use TLS 1.2 unless absolutely necessary. Allowing the agent to automatically detect, negotiate, and take advantage of future security standards is preferable. Otherwise you may miss the added security of the newer standards and possibly experience problems if TLS 1.2 is ever deprecated in favor of those newer standards. + + ## Next Steps - [Create an Activity Log Alert to monitor all autoscale engine operations on your subscription.](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/monitor-autoscale-alert) - [Create an Activity Log Alert to monitor all failed autoscale scale in/scale out operations on your subscription](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/monitor-autoscale-failed-alert) diff --git a/articles/azure-monitor/azure-monitor-monitoring-reference.md b/articles/azure-monitor/azure-monitor-monitoring-reference.md index 32a87709db21..56c7e71199d0 100644 --- a/articles/azure-monitor/azure-monitor-monitoring-reference.md +++ b/articles/azure-monitor/azure-monitor-monitoring-reference.md @@ -474,4 +474,4 @@ The following schemas are relevant to action groups, which are part of the notif ## See Also - See [Monitoring Azure Azure Monitor](monitor-azure-monitor.md) for a description of what Azure Monitor monitors in itself. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitoring Azure resources with Azure Monitor](./essentials/monitor-azure-resource.md) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/azure-monitor/best-practices-data-collection.md b/articles/azure-monitor/best-practices-data-collection.md index cf31da929641..4c4592a85a68 100644 --- a/articles/azure-monitor/best-practices-data-collection.md +++ b/articles/azure-monitor/best-practices-data-collection.md @@ -14,12 +14,15 @@ This article is part of the scenario [Recommendations for configuring Azure Moni > [!IMPORTANT] > The features of Azure Monitor and their configuration will vary depending on your business requirements balanced with the cost of the enabled features. Each step below will identify whether there is potential cost, and you should assess these costs before proceeding. See [Azure Monitor pricing](https://azure.microsoft.com/pricing/details/monitor/) for complete pricing details. -## Create Log Analytics workspace -You require at least one Log Analytics workspace to enable [Azure Monitor Logs](logs/data-platform-logs.md), which is required for collecting such data as logs from Azure resources, collecting data from the guest operating system of Azure virtual machines, and for most Azure Monitor insights. Other services such as Microsoft Sentinel and Microsoft Defender for Cloud also use a Log Analytics workspace and can share the same one that you use for Azure Monitor. You can start with a single workspace to support this monitoring, but see [Designing your Azure Monitor Logs deployment](logs/design-logs-deployment.md) for guidance on when to use multiple workspaces. +## Design Log Analytics workspace architecture +You require at least one Log Analytics workspace to enable [Azure Monitor Logs](logs/data-platform-logs.md), which is required for collecting such data as logs from Azure resources, collecting data from the guest operating system of Azure virtual machines, and for most Azure Monitor insights. Other services such as Microsoft Sentinel and Microsoft Defender for Cloud also use a Log Analytics workspace and can share the same one that you use for Azure Monitor. -There is no cost for creating a Log Analytics workspace, but there is a potential charge once you configure data to be collected into it. See [Azure Monitor Logs pricing details](logs/cost-logs.md) for details. +There is no cost for creating a Log Analytics workspace, but there is a potential charge once you configure data to be collected into it. See [Azure Monitor Logs pricing details](logs/cost-logs.md) for details on how log data is charged. + +See [Create a Log Analytics workspace in the Azure portal](logs/quick-create-workspace.md) to create an initial Log Analytics workspace and [Manage access to Log Analytics workspaces](logs/manage-access.md) to configure access. You can use scalable methods such as Resource Manager templates to configure workspaces, though this is often not required since most environments will require a minimal number. + +Start with a single workspace to support initial monitoring, but see [Design a Log Analytics workspace configuration](logs/workspace-design.md) for guidance on when to use multiple workspaces and how to locate and configure them. -See [Create a Log Analytics workspace in the Azure portal](logs/quick-create-workspace.md) to create an initial Log Analytics workspace. See [Manage access to log data and workspaces in Azure Monitor](logs/manage-access.md) to configure access. You can use scalable methods such as Resource Manager templates to configure workspaces though, this is often not required since most environments will require a minimal number. ## Collect data from Azure resources Some monitoring of Azure resources is available automatically with no configuration required, while you must perform configuration steps to collect additional monitoring data. The following table illustrates the configuration steps required to collect all available data from your Azure resources, including at which step data is sent to Azure Monitor Metrics and Azure Monitor Logs. The sections below describe each step in further detail. diff --git a/articles/azure-monitor/containers/container-insights-enable-arc-enabled-clusters.md b/articles/azure-monitor/containers/container-insights-enable-arc-enabled-clusters.md index 999dda26af97..eaecf1f747cc 100644 --- a/articles/azure-monitor/containers/container-insights-enable-arc-enabled-clusters.md +++ b/articles/azure-monitor/containers/container-insights-enable-arc-enabled-clusters.md @@ -4,7 +4,7 @@ ms.date: 05/24/2022 ms.topic: article author: shashankbarsin ms.author: shasb -description: "Collect metrics and logs of Azure Arc-enabled Kubernetes clusters using Azure Monitor" +description: Collect metrics and logs of Azure Arc-enabled Kubernetes clusters using Azure Monitor. --- # Azure Monitor Container Insights for Azure Arc-enabled Kubernetes clusters diff --git a/articles/azure-monitor/containers/container-insights-hybrid-setup.md b/articles/azure-monitor/containers/container-insights-hybrid-setup.md index 58e28cf8a18e..5321e7498f7c 100644 --- a/articles/azure-monitor/containers/container-insights-hybrid-setup.md +++ b/articles/azure-monitor/containers/container-insights-hybrid-setup.md @@ -39,7 +39,7 @@ Before you start, make sure that you have the following: - You are a member of the **Log Analytics contributor role** to enable container monitoring. For more information about how to control access to a Log Analytics workspace, see [Manage access to workspace and log data](../logs/manage-access.md). -- To view the monitoring data, you need to have [*Log Analytics reader*](../logs/manage-access.md#manage-access-using-azure-permissions) role in the Log Analytics workspace, configured with Container insights. +- To view the monitoring data, you need to have [*Log Analytics reader*](../logs/manage-access.md#azure-rbac) role in the Log Analytics workspace, configured with Container insights. - [HELM client](https://helm.sh/docs/using_helm/) to onboard the Container insights chart for the specified Kubernetes cluster. diff --git a/articles/azure-monitor/containers/container-insights-logging-v2.md b/articles/azure-monitor/containers/container-insights-logging-v2.md index 23cfeea517c2..2624c9bffcd7 100644 --- a/articles/azure-monitor/containers/container-insights-logging-v2.md +++ b/articles/azure-monitor/containers/container-insights-logging-v2.md @@ -11,10 +11,10 @@ ms.date: 05/11/2022 --- # Enable ContainerLogV2 schema (preview) -Azure Monitor Container Insights is now in Public Preview of new schema for container logs called ContainerLogV2. As part of this schema, there new fields to make common queries to view AKS (Azure Kubernetes Service) and Azure Arc enabled Kubernetes data. In addition, this schema is compatible as a part of [Basic Logs](../logs/basic-logs-configure.md), which offer a low cost alternative to standard analytics logs. +Azure Monitor Container Insights is now in Public Preview of new schema for container logs called ContainerLogV2. As part of this schema, there are new fields to make common queries to view AKS (Azure Kubernetes Service) and Azure Arc enabled Kubernetes data. In addition, this schema is compatible as a part of [Basic Logs](../logs/basic-logs-configure.md), which offer a low cost alternative to standard analytics logs. > [!NOTE] -> The ContainerLogv2 schema is currently a preview feature, some features may be limited in the Portal experience from Container Insights +> The ContainerLogv2 schema is currently a preview feature, Container Insights does not yet support the "View in Analytics" option, however the data is still available when queried directly from the [Log Analytics](./container-insights-log-query.md) interface. >[!NOTE] >The new fields are: @@ -39,28 +39,29 @@ Azure Monitor Container Insights is now in Public Preview of new schema for cont 3. Follow the instructions accordingly when configuring an existing ConfigMap or using a new one. ### Configuring an existing ConfigMap -When configuring an existing ConfigMap, we have to append the following section in your existing ConfigMap yaml file: +If your ConfigMap doesn't yet have the "[log_collection_settings.schema]" field, you'll need to append the following section in your existing ConfigMap yaml file: ```yaml [log_collection_settings.schema] - # In the absense of this configmap, default value for containerlog_schema_version is "v1" + # In the absence of this configmap, default value for containerlog_schema_version is "v1" # Supported values for this setting are "v1","v2" - # See documentation for benefits of v2 schema over v1 schema before opting for "v2" schema + # See documentation at https://aka.ms/ContainerLogv2 for benefits of v2 schema over v1 schema before opting for "v2" schema containerlog_schema_version = "v2" ``` ### Configuring a new ConfigMap -1. Download the new ConfigMap from [here](https://aka.ms/container-azm-ms-agentconfig). For new downloaded configmapdefault the value for containerlog_schema_version is "v1" +1. Download the new ConfigMap from [here](https://aka.ms/container-azm-ms-agentconfig). For the newly downloaded configmapdefault, the value for containerlog_schema_version is "v1" 1. Update the "containerlog_schema_version = "v2"" - ```yaml - [log_collection_settings.schema] - # In the absense of this configmap, default value for containerlog_schema_version is "v1" - # Supported values for this setting are "v1","v2" - # See documentation for benefits of v2 schema over v1 schema before opting for "v2" schema - containerlog_schema_version = "v2" - ``` -1. Once you have finished configuring the configmap Run the following kubectl command: kubectl apply -f `` +```yaml +[log_collection_settings.schema] + # In the absence of this configmap, default value for containerlog_schema_version is "v1" + # Supported values for this setting are "v1","v2" + # See documentation at https://aka.ms/ContainerLogv2 for benefits of v2 schema over v1 schema before opting for "v2" schema + containerlog_schema_version = "v2" +``` + +1. Once you have finished configuring the configmap, run the following kubectl command: kubectl apply -f `` >[!TIP] >Example: kubectl apply -f container-azm-ms-agentconfig.yaml. diff --git a/articles/azure-monitor/containers/container-insights-onboard.md b/articles/azure-monitor/containers/container-insights-onboard.md index e079bcf83ee9..5b8824e960ec 100644 --- a/articles/azure-monitor/containers/container-insights-onboard.md +++ b/articles/azure-monitor/containers/container-insights-onboard.md @@ -7,10 +7,10 @@ ms.date: 05/24/2022 --- # Enable Container insights -This article provides an overview of the requirements and options that are available for setting up Container insights to monitor the performance of workloads that are deployed to Kubernetes environments. You can enable Container insights for a new deployment or for one or more existing deployments of Kubernetes by using a number of supported methods. +This article provides an overview of the requirements and options that are available for configuring Container insights to monitor the performance of workloads that are deployed to Kubernetes environments. You can enable Container insights for a new deployment or for one or more existing deployments of Kubernetes by using a number of supported methods. ## Supported configurations -Container insights officially supports the following environments: +Container insights supports the following environments: - [Azure Kubernetes Service (AKS)](../../aks/index.yml) - [Azure Arc-enabled Kubernetes cluster](../../azure-arc/kubernetes/overview.md) @@ -26,15 +26,15 @@ The versions of Kubernetes and support policy are the same as those [supported i ## Prerequisites Before you start, make sure that you've met the following requirements: -### Log Analytics workspace +**Log Analytics workspace** Container insights supports a [Log Analytics workspace](../logs/log-analytics-workspace-overview.md) in the regions that are listed in [Products available by region](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=monitor). For a list of the supported mapping pairs to use for the default workspace, see [Region mappings supported by Container insights](container-insights-region-mapping.md). You can let the onboarding experience create a default workspace in the default resource group of the AKS cluster subscription. If you already have a workspace though, then you will most likely want to use that one. See [Designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md) for details. -- An AKS cluster can be attached to a Log Analytics workspace in a different Azure subscription in the same Azure AD Tenant. This cannot currently be done with the Azure Portal, but can be done with Azure CLI or Resource Manager template. +An AKS cluster can be attached to a Log Analytics workspace in a different Azure subscription in the same Azure AD Tenant. This cannot currently be done with the Azure Portal, but can be done with Azure CLI or Resource Manager template. -### Permissions +**Permissions** To enable container monitoring, you require the following permissions: - Member of the [Log Analytics contributor](../logs/manage-access.md#manage-access-using-azure-permissions) role. @@ -44,10 +44,10 @@ To enable container monitoring, you require the following permissions: - Member of [Log Analytics reader](../logs/manage-access.md#manage-access-using-azure-permissions) role if you aren't already a member of [Log Analytics contributor](../logs/manage-access.md#manage-access-using-azure-permissions). -### Promethues +**Promethues** Prometheus metrics aren't collected by default. Before you [configure the agent](container-insights-prometheus-integration.md) to collect the metrics, it's important to review the [Prometheus documentation](https://prometheus.io/) to understand what data can be scraped and what methods are supported. -### Kubelet secure port +**Kubelet secure port** Log Analytics Containerized Linux Agent (replicaset pod) makes API calls to all the Windows nodes on Kubelet Secure Port (10250) within the cluster to collect Node and Container Performance related Metrics. Kubelet secure port (:10250) should be opened in the cluster's virtual network for both inbound and outbound for Windows Node and container performance related metrics collection to work. If you have a Kubernetes cluster with Windows nodes, then please review and configure the Network Security Group and Network Policies to make sure the Kubelet secure port (:10250) is opened for both inbound and outbound in cluster's virtual network. diff --git a/articles/azure-monitor/essentials/diagnostic-settings.md b/articles/azure-monitor/essentials/diagnostic-settings.md index cecdab36427b..0466520204cb 100644 --- a/articles/azure-monitor/essentials/diagnostic-settings.md +++ b/articles/azure-monitor/essentials/diagnostic-settings.md @@ -65,7 +65,7 @@ Platform logs and metrics can be sent to the destinations in the following table | Destination | Description | |:---|:---| -| [Log Analytics workspace](../logs/design-logs-deployment.md) | Metrics are converted to log form. This option may not be available for all resource types. Sending them to the Azure Monitor Logs store (which is searchable via Log Analytics) helps you to integrate them into queries, alerts, and visualizations with existing log data. +| [Log Analytics workspace](../logs/workspace-design.md) | Metrics are converted to log form. This option may not be available for all resource types. Sending them to the Azure Monitor Logs store (which is searchable via Log Analytics) helps you to integrate them into queries, alerts, and visualizations with existing log data. | [Azure storage account](../../storage/blobs/index.yml) | Archiving logs and metrics to an Azure storage account is useful for audit, static analysis, or backup. Compared to Azure Monitor Logs and a Log Analytics workspace, Azure storage is less expensive and logs can be kept there indefinitely. | | [Event Hubs](../../event-hubs/index.yml) | Sending logs and metrics to Event Hubs allows you to stream data to external systems such as third-party SIEMs and other Log Analytics solutions. | | [Azure Monitor partner integrations](../../partner-solutions/overview.md)| Specialized integrations between Azure Monitor and other non-Microsoft monitoring platforms. Useful when you are already using one of the partners. | diff --git a/articles/azure-monitor/essentials/resource-logs-schema.md b/articles/azure-monitor/essentials/resource-logs-schema.md index 7d6d3da39992..7ede4b29cc89 100644 --- a/articles/azure-monitor/essentials/resource-logs-schema.md +++ b/articles/azure-monitor/essentials/resource-logs-schema.md @@ -90,7 +90,7 @@ The schema for resource logs varies depending on the resource and log category. | Azure Storage | [Blobs](../../storage/blobs/monitor-blob-storage-reference.md#resource-logs-preview), [Files](../../storage/files/storage-files-monitoring-reference.md#resource-logs-preview), [Queues](../../storage/queues/monitor-queue-storage-reference.md#resource-logs-preview), [Tables](../../storage/tables/monitor-table-storage-reference.md#resource-logs-preview) | | Azure Stream Analytics |[Job logs](../../stream-analytics/stream-analytics-job-diagnostic-logs.md) | | Azure Traffic Manager | [Traffic Manager log schema](../../traffic-manager/traffic-manager-diagnostic-logs.md) | -| Azure Video Indexer|[Monitor Azure Video Indexer data reference](/azure/azure-video-indexer/monitor-video-indexer-data-reference)| +| Azure Video Indexer|[Monitor Azure Video Indexer data reference](../../azure-video-indexer/monitor-video-indexer-data-reference.md)| | Azure Virtual Network | Schema not available | | Virtual network gateways | [Logging for Virtual Network Gateways](../../vpn-gateway/troubleshoot-vpn-with-azure-diagnostics.md)| @@ -102,4 +102,4 @@ The schema for resource logs varies depending on the resource and log category. * [Learn more about resource logs](../essentials/platform-logs-overview.md) * [Stream resource logs to Event Hubs](./resource-logs.md#send-to-azure-event-hubs) * [Change resource log diagnostic settings by using the Azure Monitor REST API](/rest/api/monitor/diagnosticsettings) -* [Analyze logs from Azure Storage with Log Analytics](./resource-logs.md#send-to-log-analytics-workspace) +* [Analyze logs from Azure Storage with Log Analytics](./resource-logs.md#send-to-log-analytics-workspace) \ No newline at end of file diff --git a/articles/azure-monitor/faq.yml b/articles/azure-monitor/faq.yml index 1ff6c08c9311..7dee95562078 100644 --- a/articles/azure-monitor/faq.yml +++ b/articles/azure-monitor/faq.yml @@ -133,7 +133,7 @@ sections: - question: | What is a Log Analytics workspace? answer: | - All log data collected by Azure Monitor is stored in a Log Analytics workspace. A workspace is essentially a container where log data is collected from various sources. You may have a single Log Analytics workspace for all your monitoring data or may have requirements for multiple workspaces. See [Designing your Azure Monitor Logs deployment](logs/design-logs-deployment.md). + All log data collected by Azure Monitor is stored in a Log Analytics workspace. A workspace is essentially a container where log data is collected from a variety of sources. You may have a single Log Analytics workspace for all your monitoring data or may have requirements for multiple workspaces. See Design a Log Analytics workspace configuration(logs/workspace-design.md). - question: | Can you move an existing Log Analytics workspace to another Azure subscription? diff --git a/articles/azure-monitor/insights/alert-management-solution.md b/articles/azure-monitor/insights/alert-management-solution.md index 516e27fc0cdd..8932c7fc3ebc 100644 --- a/articles/azure-monitor/insights/alert-management-solution.md +++ b/articles/azure-monitor/insights/alert-management-solution.md @@ -13,14 +13,14 @@ ms.date: 01/02/2022 ![Alert Management icon](media/alert-management-solution/icon.png) > [!CAUTION] -> This solution is no longer in active development and may not work as expected. We suggest you try using [Azure Resource Graph to query Azure Monitor alerts](../alerts/alerts-overview.md#manage-your-alert-instances-programmatically). +> This solution is no longer in active development and may not work as expected. We suggest you try using [Azure Resource Graph to query Azure Monitor alerts](../alerts/alerts-overview.md#manage-your-alerts-programmatically). -The Alert Management solution helps you analyze all of the alerts in your Log Analytics repository. These alerts may have come from a variety of sources including those sources [created by Log Analytics](../alerts/alerts-overview.md) or [imported from Nagios or Zabbix](../vm/monitor-virtual-machine.md). The solution also imports alerts from any [connected System Center Operations Manager management groups](../agents/om-agents.md). +The Alert Management solution helps you analyze all of the alerts in your Log Analytics repository. These alerts may have come from a variety of sources including those sources [created by Log Analytics](../alerts/alerts-types.md#log-alerts) or [imported from Nagios or Zabbix](../vm/monitor-virtual-machine.md). The solution also imports alerts from any [connected System Center Operations Manager management groups](../agents/om-agents.md). ## Prerequisites The solution works with any records in the Log Analytics repository with a type of **Alert**, so you must perform whatever configuration is required to collect these records. -- For Log Analytics alerts, [create alert rules](../alerts/alerts-overview.md) to create alert records directly in the repository. +- For Log Analytics alerts, [create alert rules](../alerts/alerts-log.md) to create alert records directly in the repository. - For Nagios and Zabbix alerts, [configure those servers](../vm/monitor-virtual-machine.md) to send alerts to Log Analytics. - For System Center Operations Manager alerts, [connect your Operations Manager management group to your Log Analytics workspace](../agents/om-agents.md). Any alerts created in System Center Operations Manager are imported into Log Analytics. diff --git a/articles/azure-monitor/logs/cost-logs.md b/articles/azure-monitor/logs/cost-logs.md index 50cdfea63818..8ae76080e310 100644 --- a/articles/azure-monitor/logs/cost-logs.md +++ b/articles/azure-monitor/logs/cost-logs.md @@ -10,17 +10,17 @@ ms.date: 03/24/2022 The most significant charges for most Azure Monitor implementations will typically be ingestion and retention of data in your Log Analytics workspaces. Several features in Azure Monitor do not have a direct cost but add to the workspace data that's collected. This article describes how data charges are calculated for your Log Analytics workspaces and Application Insights resources and the different configuration options that affect your costs. ## Pricing model -The default pricing for Log Analytics is a Pay-As-You-Go model that's based on ingested data volume and data retention. Each Log Analytics workspace is charged as a separate service and contributes to the bill for your Azure subscription. The amount of data ingestion can be considerable, depending on the following factors: +The default pricing for Log Analytics is a Pay-As-You-Go model that's based on ingested data volume and data retention. Each Log Analytics workspace is charged as a separate service and contributes to the bill for your Azure subscription. [Pricing for Log Analytics](https://azure.microsoft.com/pricing/details/monitor/) is set regionally. The amount of data ingestion can be considerable, depending on the following factors: - The set of management solutions enabled and their configuration - The number and type of monitored resources -- Type of data collected from each monitored resource +- The types of data collected from each monitored resource ## Data size calculation Data volume is measured as the size of the data that will be stored in GB (10^9 bytes). The data size of a single record is calculated from a string representation of the columns that are stored in the Log Analytics workspace for that record, regardless of whether the data is sent from an agent or added during the ingestion process. This includes any custom columns added by the [custom logs API](custom-logs-overview.md), [ingestion-time transformations](ingestion-time-transformations.md), or [custom fields](custom-fields.md) that are added as data is collected and then stored in the workspace. >[!NOTE] ->The billable data volume calculation is substantially smaller than the size of the entire incoming JSON-packaged event, often less than 50%. It is essential to understand this calculation of billed data size when estimating costs and comparing to other pricing models. +>The billable data volume calculation is substantially smaller than the size of the entire incoming JSON-packaged event, often less than 50% for small events. It is essential to understand this calculation of billed data size when estimating costs and comparing to other pricing models. ### Excluded columns The following [standard columns](log-standard-columns.md) that are common to all tables, are excluded in the calculation of the record size. All other columns stored in Log Analytics are included in the calculation of the record size. diff --git a/articles/azure-monitor/logs/cross-workspace-query.md b/articles/azure-monitor/logs/cross-workspace-query.md index 233ec68a2233..2e81b2bbc834 100644 --- a/articles/azure-monitor/logs/cross-workspace-query.md +++ b/articles/azure-monitor/logs/cross-workspace-query.md @@ -17,7 +17,7 @@ If you manage subscriptions in other Azure Active Directory (Azure AD) tenants t There are two methods to query data that is stored in multiple workspace and apps: 1. Explicitly by specifying the workspace and app details. This technique is detailed in this article. -2. Implicitly using [resource-context queries](./design-logs-deployment.md#access-mode). When you query in the context of a specific resource, resource group or a subscription, the relevant data will be fetched from all workspaces that contains data for these resources. Application Insights data that is stored in apps, will not be fetched. +2. Implicitly using [resource-context queries](manage-access.md#access-mode). When you query in the context of a specific resource, resource group or a subscription, the relevant data will be fetched from all workspaces that contains data for these resources. Application Insights data that is stored in apps, will not be fetched. > [!IMPORTANT] > If you are using a [workspace-based Application Insights resource](../app/create-workspace-resource.md), telemetry is stored in a Log Analytics workspace with all other log data. Use the workspace() expression to write a query that includes applications in multiple workspaces. For multiple applications in the same workspace, you don't need a cross workspace query. diff --git a/articles/azure-monitor/logs/customer-managed-keys.md b/articles/azure-monitor/logs/customer-managed-keys.md index e7ee415e5bbc..f80af1a5810c 100644 --- a/articles/azure-monitor/logs/customer-managed-keys.md +++ b/articles/azure-monitor/logs/customer-managed-keys.md @@ -472,4 +472,4 @@ Customer-Managed key is provided on dedicated cluster and these operations are r ## Next steps - Learn about [Log Analytics dedicated cluster billing](cost-logs.md#dedicated-clusters) -- Learn about [proper design of Log Analytics workspaces](./design-logs-deployment.md) +- Learn about [proper design of Log Analytics workspaces](./workspace-design.md) diff --git a/articles/azure-monitor/logs/data-collector-api.md b/articles/azure-monitor/logs/data-collector-api.md index f53c6f703fc9..85fd99c4fbaf 100644 --- a/articles/azure-monitor/logs/data-collector-api.md +++ b/articles/azure-monitor/logs/data-collector-api.md @@ -47,7 +47,7 @@ To use the HTTP Data Collector API, you create a POST request that includes the | Authorization |The authorization signature. Later in the article, you can read about how to create an HMAC-SHA256 header. | | Log-Type |Specify the record type of the data that's being submitted. It can contain only letters, numbers, and the underscore (_) character, and it can't exceed 100 characters. | | x-ms-date |The date that the request was processed, in RFC 7234 format. | -| x-ms-AzureResourceId | The resource ID of the Azure resource that the data should be associated with. It populates the [_ResourceId](./log-standard-columns.md#_resourceid) property and allows the data to be included in [resource-context](./design-logs-deployment.md#access-mode) queries. If this field isn't specified, the data won't be included in resource-context queries. | +| x-ms-AzureResourceId | The resource ID of the Azure resource that the data should be associated with. It populates the [_ResourceId](./log-standard-columns.md#_resourceid) property and allows the data to be included in [resource-context](manage-access.md#access-mode) queries. If this field isn't specified, the data won't be included in resource-context queries. | | time-generated-field | The name of a field in the data that contains the timestamp of the data item. If you specify a field, its contents are used for **TimeGenerated**. If you don't specify this field, the default for **TimeGenerated** is the time that the message is ingested. The contents of the message field should follow the ISO 8601 format YYYY-MM-DDThh:mm:ssZ. Note: the Time Generated value cannot be older than 3 days before received time or the row will be dropped.| | | | diff --git a/articles/azure-monitor/logs/data-platform-logs.md b/articles/azure-monitor/logs/data-platform-logs.md index eee5866f85af..fcb2d3a50257 100644 --- a/articles/azure-monitor/logs/data-platform-logs.md +++ b/articles/azure-monitor/logs/data-platform-logs.md @@ -48,7 +48,7 @@ This configuration will be different depending on the data source. For example: For a complete list of data sources that you can configure to send data to Azure Monitor Logs, see [What is monitored by Azure Monitor?](../monitor-reference.md). ## Log Analytics workspaces -Azure Monitor Logs stores the data that it collects in one or more [Log Analytics workspaces](./design-logs-deployment.md). You must create at least one workspace to use Azure Monitor Logs. See [Log Analytics workspace overview](log-analytics-workspace-overview.md) For a description of Log Analytics workspaces. +Azure Monitor Logs stores the data that it collects in one or more [Log Analytics workspaces](./workspace-design.md). You must create at least one workspace to use Azure Monitor Logs. See [Log Analytics workspace overview](log-analytics-workspace-overview.md) For a description of Log Analytics workspaces. ## Log Analytics Log Analytics is a tool in the Azure portal. Use it to edit and run log queries and interactively analyze their results. You can then use those queries to support other features in Azure Monitor, such as log query alerts and workbooks. Access Log Analytics from the **Logs** option on the Azure Monitor menu or from most other services in the Azure portal. diff --git a/articles/azure-monitor/logs/design-logs-deployment.md b/articles/azure-monitor/logs/design-logs-deployment.md deleted file mode 100644 index fdbbedcc6d93..000000000000 --- a/articles/azure-monitor/logs/design-logs-deployment.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Designing your Azure Monitor Logs deployment | Microsoft Docs -description: This article describes the considerations and recommendations for customers preparing to deploy a workspace in Azure Monitor. -ms.topic: conceptual -author: guywi-ms -ms.author: guywild -ms.reviewer: meirm -ms.date: 05/04/2022 - ---- - -# Designing your Azure Monitor Logs deployment - -Azure Monitor stores [log](data-platform-logs.md) data in a Log Analytics workspace, which is an Azure resource and a container where data is collected, aggregated, and serves as an administrative boundary. While you can deploy one or more workspaces in your Azure subscription, there are several considerations you should understand in order to ensure your initial deployment is following our guidelines to provide you with a cost effective, manageable, and scalable deployment meeting your organization's needs. - -Data in a workspace is organized into tables, each of which stores different kinds of data and has its own unique set of properties based on the resource generating the data. Most data sources will write to their own tables in a Log Analytics workspace. - -![Example workspace data model](./media/design-logs-deployment/logs-data-model-01.png) - -A Log Analytics workspace provides: - -* A geographic location for data storage. -* Data isolation by granting different users access rights following one of our recommended design strategies. -* Scope for configuration of settings like [pricing tier](cost-logs.md#commitment-tiers), [retention](data-retention-archive.md), and [data capping](daily-cap.md). - -Workspaces are hosted on physical clusters. By default, the system is creating and managing these clusters. Customers that ingest more than 4TB/day are expected to create their own dedicated clusters for their workspaces - it enables them better control and higher ingestion rate. - -This article provides a detailed overview of the design and migration considerations, access control overview, and an understanding of the design implementations we recommend for your IT organization. - - - -## Important considerations for an access control strategy - -Identifying the number of workspaces you need is influenced by one or more of the following requirements: - -* You are a global company and you need log data stored in specific regions for data sovereignty or compliance reasons. -* You are using Azure and you want to avoid outbound data transfer charges by having a workspace in the same region as the Azure resources it manages. -* You manage multiple departments or business groups, and you want each to see their own data, but not data from others. Also, there is no business requirement for a consolidated cross department or business group view. - -IT organizations today are modeled following either a centralized, decentralized, or an in-between hybrid of both structures. As a result, the following workspace deployment models have been commonly used to map to one of these organizational structures: - -* **Centralized**: All logs are stored in a central workspace and administered by a single team, with Azure Monitor providing differentiated access per-team. In this scenario, it is easy to manage, search across resources, and cross-correlate logs. The workspace can grow significantly depending on the amount of data collected from multiple resources in your subscription, with additional administrative overhead to maintain access control to different users. This model is known as "hub and spoke". -* **Decentralized**: Each team has their own workspace created in a resource group they own and manage, and log data is segregated per resource. In this scenario, the workspace can be kept secure and access control is consistent with resource access, but it's difficult to cross-correlate logs. Users who need a broad view of many resources cannot analyze the data in a meaningful way. -* **Hybrid**: Security audit compliance requirements further complicate this scenario because many organizations implement both deployment models in parallel. This commonly results in a complex, expensive, and hard-to-maintain configuration with gaps in logs coverage. - -When using the Log Analytics agents to collect data, you need to understand the following in order to plan your agent deployment: - -* To collect data from Windows agents, you can [configure each agent to report to one or more workspaces](./../agents/agent-windows.md), even while it is reporting to a System Center Operations Manager management group. The Windows agent can report up to four workspaces. -* The Linux agent does not support multi-homing and can only report to a single workspace. - -If you are using System Center Operations Manager 2012 R2 or later: - -* Each Operations Manager management group can be [connected to only one workspace](../agents/om-agents.md). -* Linux computers reporting to a management group must be configured to report directly to a Log Analytics workspace. If your Linux computers are already reporting directly to a workspace and you want to monitor them with Operations Manager, follow these steps to [report to an Operations Manager management group](../agents/agent-manage.md#configure-agent-to-report-to-an-operations-manager-management-group). -* You can install the Log Analytics Windows agent on the Windows computer and have it report to both Operations Manager integrated with a workspace, and a different workspace. - -## Access control overview - -With Azure role-based access control (Azure RBAC), you can grant users and groups only the amount of access they need to work with monitoring data in a workspace. This allows you to align with your IT organization operating model using a single workspace to store collected data enabled on all your resources. For example, you grant access to your team responsible for infrastructure services hosted on Azure virtual machines (VMs), and as a result they'll have access to only the logs generated by the VMs. This is following our new resource-context log model. The basis for this model is for every log record emitted by an Azure resource, it is automatically associated with this resource. Logs are forwarded to a central workspace that respects scoping and Azure RBAC based on the resources. - -The data a user has access to is determined by a combination of factors that are listed in the following table. Each is described in the sections below. - -| Factor | Description | -|:---|:---| -| [Access mode](#access-mode) | Method the user uses to access the workspace. Defines the scope of the data available and the access control mode that's applied. | -| [Access control mode](#access-control-mode) | Setting on the workspace that defines whether permissions are applied at the workspace or resource level. | -| [Permissions](./manage-access.md) | Permissions applied to individual or groups of users for the workspace or resource. Defines what data the user will have access to. | -| [Table level Azure RBAC](./manage-access.md#table-level-azure-rbac) | Optional granular permissions that apply to all users regardless of their access mode or access control mode. Defines which data types a user can access. | - -## Access mode - -The *access mode* refers to how a user accesses a Log Analytics workspace and defines the scope of data they can access. - -Users have two options for accessing the data: - -* **Workspace-context**: You can view all logs in the workspace you have permission to. Queries in this mode are scoped to all data in all tables in the workspace. This is the access mode used when logs are accessed with the workspace as the scope, such as when you select **Logs** from the **Azure Monitor** menu in the Azure portal. - - ![Log Analytics context from workspace](./media/design-logs-deployment/query-from-workspace.png) - -* **Resource-context**: When you access the workspace for a particular resource, resource group, or subscription, such as when you select **Logs** from a resource menu in the Azure portal, you can view logs for only resources in all tables that you have access to. Queries in this mode are scoped to only data associated with that resource. This mode also enables granular Azure RBAC. - - ![Log Analytics context from resource](./media/design-logs-deployment/query-from-resource.png) - - > [!NOTE] - > Logs are available for resource-context queries only if they were properly associated with the relevant resource. Currently, the following resources have limitations: - > - Computers outside of Azure - Supported for resource-context only via [Azure Arc for Servers](../../azure-arc/servers/index.yml) - > - Service Fabric - > - Application Insights - Supported for resource-context only when using [Workspace-based Application Insights resource](../app/create-workspace-resource.md) - > - > You can test if logs are properly associated with their resource by running a query and inspecting the records you're interested in. If the correct resource ID is in the [_ResourceId](./log-standard-columns.md#_resourceid) property, then data is available to resource-centric queries. - -Azure Monitor automatically determines the right mode depending on the context you perform the log search from. The scope is always presented in the top-left section of Log Analytics. - -### Comparing access modes - -The following table summarizes the access modes: - -| Issue | Workspace-context | Resource-context | -|:---|:---|:---| -| Who is each model intended for? | Central administration. Administrators who need to configure data collection and users who need access to a wide variety of resources. Also currently required for users who need to access logs for resources outside of Azure. | Application teams. Administrators of Azure resources being monitored. | -| What does a user require to view logs? | Permissions to the workspace. See **Workspace permissions** in [Manage access using workspace permissions](./manage-access.md#manage-access-using-workspace-permissions). | Read access to the resource. See **Resource permissions** in [Manage access using Azure permissions](./manage-access.md#manage-access-using-azure-permissions). Permissions can be inherited (such as from the containing resource group) or directly assigned to the resource. Permission to the logs for the resource will be automatically assigned. | -| What is the scope of permissions? | Workspace. Users with access to the workspace can query all logs in the workspace from tables that they have permissions to. See [Table access control](./manage-access.md#table-level-azure-rbac) | Azure resource. User can query logs for specific resources, resource groups, or subscription they have access to from any workspace but can't query logs for other resources. | -| How can user access logs? |
    • Start **Logs** from **Azure Monitor** menu.
    • Start **Logs** from **Log Analytics workspaces**.
    • From Azure Monitor [Workbooks](../best-practices-analysis.md#workbooks).
    |
    • Start **Logs** from the menu for the Azure resource
    • Start **Logs** from **Azure Monitor** menu.
    • Start **Logs** from **Log Analytics workspaces**.
    • From Azure Monitor [Workbooks](../best-practices-analysis.md#workbooks).
    | - -## Access control mode - -The *Access control mode* is a setting on each workspace that defines how permissions are determined for the workspace. - -* **Require workspace permissions**: This control mode does not allow granular Azure RBAC. For a user to access the workspace, they must be granted permissions to the workspace or to specific tables. - - If a user accesses the workspace following the workspace-context mode, they have access to all data in any table they've been granted access to. If a user accesses the workspace following the resource-context mode, they have access to only data for that resource in any table they've been granted access to. - - This is the default setting for all workspaces created before March 2019. - -* **Use resource or workspace permissions**: This control mode allows granular Azure RBAC. Users can be granted access to only data associated with resources they can view by assigning Azure `read` permission. - - When a user accesses the workspace in workspace-context mode, workspace permissions apply. When a user accesses the workspace in resource-context mode, only resource permissions are verified, and workspace permissions are ignored. Enable Azure RBAC for a user by removing them from workspace permissions and allowing their resource permissions to be recognized. - - This is the default setting for all workspaces created after March 2019. - - > [!NOTE] - > If a user has only resource permissions to the workspace, they are only able to access the workspace using resource-context mode assuming the workspace access mode is set to **Use resource or workspace permissions**. - -To learn how to change the access control mode in the portal, with PowerShell, or using a Resource Manager template, see [Configure access control mode](./manage-access.md#configure-access-control-mode). - -## Scale and ingestion volume rate limit - -Azure Monitor is a high scale data service that serves thousands of customers sending petabytes of data each month at a growing pace. Workspaces are not limited in their storage space and can grow to petabytes of data. There is no need to split workspaces due to scale. - -To protect and isolate Azure Monitor customers and its backend infrastructure, there is a default ingestion rate limit that is designed to protect from spikes and floods situations. The rate limit default is about **6 GB/minute** and is designed to enable normal ingestion. For more details on ingestion volume limit measurement, see [Azure Monitor service limits](../service-limits.md#data-ingestion-volume-rate). - -Customers that ingest less than 4TB/day will usually not meet these limits. Customers that ingest higher volumes or that have spikes as part of their normal operations shall consider moving to [dedicated clusters](./logs-dedicated-clusters.md) where the ingestion rate limit could be raised. - -When the ingestion rate limit is activated or get to 80% of the threshold, an event is added to the *Operation* table in your workspace. It is recommended to monitor it and create an alert. See more details in [data ingestion volume rate](../service-limits.md#data-ingestion-volume-rate). - - -## Recommendations - -![Resource-context design example](./media/design-logs-deployment/workspace-design-resource-context-01.png) - -This scenario covers a single workspace design in your IT organization's subscription that is not constrained by data sovereignty or regulatory compliance, or needs to map to the regions your resources are deployed within. It allows your organization's security and IT admin teams the ability to leverage the improved integration with Azure access management and more secure access control. - -All resources, monitoring solutions, and Insights such as Application Insights and VM insights, supporting infrastructure and applications maintained by the different teams are configured to forward their collected log data to the IT organization's centralized shared workspace. Users on each team are granted access to logs for resources they have been given access to. - -Once you have deployed your workspace architecture, you can enforce this on Azure resources with [Azure Policy](../../governance/policy/overview.md). It provides a way to define policies and ensure compliance with your Azure resources so they send all their resource logs to a particular workspace. For example, with Azure virtual machines or virtual machine scale sets, you can use existing policies that evaluate workspace compliance and report results, or customize to remediate if non-compliant. - -## Workspace consolidation migration strategy - -For customers who have already deployed multiple workspaces and are interested in consolidating to the resource-context access model, we recommend you take an incremental approach to migrate to the recommended access model, and you don't attempt to achieve this quickly or aggressively. Following a phased approach to plan, migrate, validate, and retire following a reasonable timeline will help avoid any unplanned incidents or unexpected impact to your cloud operations. If you do not have a data retention policy for compliance or business reasons, you need to assess the appropriate length of time to retain data in the workspace you are migrating from during the process. While you are reconfiguring resources to report to the shared workspace, you can still analyze the data in the original workspace as necessary. Once the migration is complete, if you're governed to retain data in the original workspace before the end of the retention period, don't delete it. - -While planning your migration to this model, consider the following: - -* Understand what industry regulations and internal policies regarding data retention you must comply with. -* Make sure that your application teams can work within the existing resource-context functionality. -* Identify the access granted to resources for your application teams and test in a development environment before implementing in production. -* Configure the workspace to enable **Use resource or workspace permissions**. -* Remove application teams permission to read and query the workspace. -* Enable and configure any monitoring solutions, Insights such as Container insights and/or Azure Monitor for VMs, your Automation account(s), and management solutions such as Update Management, Start/Stop VMs, etc., that were deployed in the original workspace. - -## Next steps - -To implement the security permissions and controls recommended in this guide, review [manage access to logs](./manage-access.md). diff --git a/articles/azure-monitor/logs/log-analytics-workspace-overview.md b/articles/azure-monitor/logs/log-analytics-workspace-overview.md index 8e183a3e9dda..fa64e8aa6bba 100644 --- a/articles/azure-monitor/logs/log-analytics-workspace-overview.md +++ b/articles/azure-monitor/logs/log-analytics-workspace-overview.md @@ -14,7 +14,7 @@ A Log Analytics workspace is a unique environment for log data from Azure Monito You can use a single workspace for all your data collection, or you may create multiple workspaces based on a variety of requirements such as the geographic location of the data, access rights that define which users can access data, and configuration settings such as the pricing tier and data retention. -To create a new workspace, see [Create a Log Analytics workspace in the Azure portal](./quick-create-workspace.md). For considerations on creating multiple workspaces, see [Designing your Azure Monitor Logs deployment](design-logs-deployment.md). +To create a new workspace, see [Create a Log Analytics workspace in the Azure portal](./quick-create-workspace.md). For considerations on creating multiple workspaces, see Design a Log Analytics workspace configuration(workspace-design.md). ## Data structure @@ -70,12 +70,12 @@ To access archived data, you must first retrieve data from it in an Analytics Lo ## Permissions -Permission to data in a Log Analytics workspace is defined by the [access control mode](design-logs-deployment.md#access-control-mode), which is a setting on each workspace. Users can either be given explicit access to the workspace using a [built-in or custom role](../roles-permissions-security.md), or you can allow access to data collected for Azure resources to users with access to those resources. +Permission to data in a Log Analytics workspace is defined by the [access control mode](manage-access.md#access-control-mode), which is a setting on each workspace. Users can either be given explicit access to the workspace using a [built-in or custom role](../roles-permissions-security.md), or you can allow access to data collected for Azure resources to users with access to those resources. See [Manage access to log data and workspaces in Azure Monitor](manage-access.md) for details on the different permission options and on configuring permissions. ## Next steps - [Create a new Log Analytics workspace](quick-create-workspace.md) -- See [Designing your Azure Monitor Logs deployment](design-logs-deployment.md) for considerations on creating multiple workspaces. +- See Design a Log Analytics workspace configuration(workspace-design.md) for considerations on creating multiple workspaces. - [Learn about log queries to retrieve and analyze data from a Log Analytics workspace.](./log-query-overview.md) diff --git a/articles/azure-monitor/logs/logs-dedicated-clusters.md b/articles/azure-monitor/logs/logs-dedicated-clusters.md index ac5c08eba8fc..8c156f7cd48a 100644 --- a/articles/azure-monitor/logs/logs-dedicated-clusters.md +++ b/articles/azure-monitor/logs/logs-dedicated-clusters.md @@ -39,16 +39,16 @@ Log Analytics Dedicated Clusters use a commitment tier pricing model of at least Provide the following properties when creating new dedicated cluster: -- **ClusterName**--must be unique per resource group -- **ResourceGroupName**--use central IT resource group since clusters are usually shared by many teams in the organization. For more design considerations, review [Designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md). +- **ClusterName**: Must be unique for the resource group. +- **ResourceGroupName**: You should use a central IT resource group because clusters are usually shared by many teams in the organization. For more design considerations, review Design a Log Analytics workspace configuration(../logs/workspace-design.md). - **Location** -- **SkuCapacity**--the Commitment Tier (formerly called capacity reservations) can be set to 500, 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicate clusters](./cost-logs.md#dedicated-clusters). +- **SkuCapacity**: The Commitment Tier (formerly called capacity reservations) can be set to 500, 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicate clusters](./cost-logs.md#dedicated-clusters). The user account that creates the clusters must have the standard Azure resource creation permission: `Microsoft.Resources/deployments/*` and cluster write permission `Microsoft.OperationalInsights/clusters/write` by having in their role assignments this specific action or `Microsoft.OperationalInsights/*` or `*/write`. After you create your cluster resource, you can edit additional properties such as *sku*, *keyVaultProperties, or *billingType*. See more details below. -You can have up to five active clusters per subscription per region. If the cluster is deleted, it is still reserved for 14 days. You can have up to four reserved clusters per subscription per region (active or recently deleted). +You can have up to five active clusters per subscription per region. If the cluster is deleted, it is still reserved for 14 days. You can have up to seven reserved clusters per subscription per region (active or recently deleted). > [!NOTE] > Cluster creation triggers resource allocation and provisioning. This operation can take a few hours to complete. @@ -581,7 +581,7 @@ Authorization: Bearer - A maximum of five active clusters can be created in each region and subscription. -- A maximum number of four reserved clusters (active or recently deleted) can be created in each region and subscription. +- A maximum number of seven reserved clusters (active or recently deleted) can exist in each region and subscription. - A maximum of 1,000 Log Analytics workspaces can be linked to a cluster. @@ -654,4 +654,4 @@ Authorization: Bearer ## Next steps - Learn about [Log Analytics dedicated cluster billing](cost-logs.md#dedicated-clusters) -- Learn about [proper design of Log Analytics workspaces](../logs/design-logs-deployment.md) +- Learn about [proper design of Log Analytics workspaces](../logs/workspace-design.md) diff --git a/articles/azure-monitor/logs/manage-access.md b/articles/azure-monitor/logs/manage-access.md index 88c9f4069f34..039b4f2233ed 100644 --- a/articles/azure-monitor/logs/manage-access.md +++ b/articles/azure-monitor/logs/manage-access.md @@ -1,5 +1,5 @@ --- -title: Manage Log Analytics workspaces in Azure Monitor | Microsoft Docs +title: Manage access to Log Analytics workspaces description: You can manage access to data stored in a Log Analytics workspace in Azure Monitor using resource, workspace, or table-level permissions. This article details how to complete. ms.topic: conceptual ms.reviewer: MeirMen @@ -8,44 +8,85 @@ ms.custom: devx-track-azurepowershell --- -# Manage access to log data and workspaces in Azure Monitor +# Manage access to Log Analytics workspaces + The data in a Log Analytics workspace that a user can access is determined by a combination of factors including settings on the workspace itself, the user's access to resources sending data to the workspace, and the method that the user accesses the workspace. This article describes how access is managed and how to perform any required configuration. -Azure Monitor stores [log](../logs/data-platform-logs.md) data in a Log Analytics workspace. A workspace is a container that includes data and configuration information. To manage access to log data, you perform various administrative tasks related to your workspace. +## Overview +The factors that define the data a user can access are briefly described in the following table. Each is further described in the sections below. -This article explains how to manage access to logs and to administer the workspaces that contain them, including how to grant access to: +| Factor | Description | +|:---|:---| +| [Access mode](#access-mode) | Method the user uses to access the workspace. Defines the scope of the data available and the access control mode that's applied. | +| [Access control mode](#access-control-mode) | Setting on the workspace that defines whether permissions are applied at the workspace or resource level. | +| [Azure RBAC](#azure-rbac) | Permissions applied to individual or groups of users for the workspace or resource sending data to the workspace. Defines what data the user will have access to. | +| [Table level Azure RBAC](#table-level-azure-rbac) | Optional permissions that defines specific data types in the workspace that a user can access. Apply to all users regardless of their access mode or access control mode. | -* The workspace using workspace permissions. -* Users who need access to log data from specific resources using Azure role-based access control (Azure RBAC) - also known as [resource-context](../logs/design-logs-deployment.md#access-mode) -* Users who need access to log data in a specific table in the workspace using Azure RBAC. -To understand the Logs concepts around Azure RBAC and access strategies, read [designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md) +## Access mode +The *access mode* refers to how a user accesses a Log Analytics workspace and defines the data they can access during the current session. The mode is determined according to the [scope](scope.md) you select in Log Analytics. -## Configure access control mode +There are two access modes: -You can view the [access control mode](../logs/design-logs-deployment.md) configured on a workspace from the Azure portal or with Azure PowerShell. You can change this setting using one of the following supported methods: +- **Workspace-context**: You can view all logs in the workspace that you have permission to. Queries in this mode are scoped to all data in all tables in the workspace. This is the access mode used when logs are accessed with the workspace as the scope, such as when you select **Logs** from the **Azure Monitor** menu in the Azure portal. -* Azure portal + - **Resource-context**: When you access the workspace for a particular resource, resource group, or subscription, such as when you select **Logs** from a resource menu in the Azure portal, you can view logs for only resources in all tables that you have access to. Queries in this mode are scoped to only data associated with that resource. This mode also enables granular Azure RBAC. Workspaces use a resource-context log model where every log record emitted by an Azure resource, is automatically associated with this resource. -* Azure PowerShell + +Records are only available in resource-context queries if they are associated with the relevant resource. You can check this association by running a query and verifying that the [_ResourceId](./log-standard-columns.md#_resourceid) column is populated. -* Azure Resource Manager template +There are known limitations with the following resources: -### From the Azure portal +- Computers outside of Azure. Resource-context is only supported with [Azure Arc for Servers](../../azure-arc/servers/index.yml). +- Application Insights. Supported for resource-context only when using [Workspace-based Application Insights resource](../app/create-workspace-resource.md) +- Service Fabric -You can view the current workspace access control mode on the **Overview** page for the workspace in the **Log Analytics workspace** menu. -![View workspace access control mode](media/manage-access/view-access-control-mode.png) +### Comparing access modes + +The following table summarizes the access modes: + +| Issue | Workspace-context | Resource-context | +|:---|:---|:---| +| Who is each model intended for? | Central administration.
    Administrators who need to configure data collection and users who need access to a wide variety of resources. Also currently required for users who need to access logs for resources outside of Azure. | Application teams.
    Administrators of Azure resources being monitored. Allows them to focus on their resource without filtering. | +| What does a user require to view logs? | Permissions to the workspace.
    See **Workspace permissions** in [Manage access using workspace permissions](./manage-access.md#azure-rbac). | Read access to the resource.
    See **Resource permissions** in [Manage access using Azure permissions](./manage-access.md#azure-rbac). Permissions can be inherited from the resource group or subscription or directly assigned to the resource. Permission to the logs for the resource will be automatically assigned. The user doesn't require access to the workspace.| +| What is the scope of permissions? | Workspace.
    Users with access to the workspace can query all logs in the workspace from tables that they have permissions to. See [Table access control](./manage-access.md#table-level-azure-rbac) | Azure resource.
    User can query logs for specific resources, resource groups, or subscription they have access to in any workspace but can't query logs for other resources. | +| How can user access logs? | Start **Logs** from **Azure Monitor** menu.

    Start **Logs** from **Log Analytics workspaces**.

    From Azure Monitor [Workbooks](../best-practices-analysis.md#workbooks). | Start **Logs** from the menu for the Azure resource. User will have access to data for that resource.

    Start **Logs** from **Azure Monitor** menu. User will have access to data for all resources they have access to.

    Start **Logs** from **Log Analytics workspaces**. User will have access to data for all resources they have access to.

    From Azure Monitor [Workbooks](../best-practices-analysis.md#workbooks). | + +## Access control mode + +The *Access control mode* is a setting on each workspace that defines how permissions are determined for the workspace. + +* **Require workspace permissions**. This control mode does not allow granular Azure RBAC. For a user to access the workspace, they must be [granted permissions to the workspace](#azure-rbac) or to [specific tables](#table-level-azure-rbac). + + If a user accesses the workspace in [workspace-context mode](#access-mode), they have access to all data in any table they've been granted access to. If a user accesses the workspace in [resource-context mode](#access-mode), they have access to only data for that resource in any table they've been granted access to. + + This is the default setting for all workspaces created before March 2019. + +* **Use resource or workspace permissions**. This control mode allows granular Azure RBAC. Users can be granted access to only data associated with resources they can view by assigning Azure `read` permission. + + When a user accesses the workspace in [workspace-context mode](#access-mode), workspace permissions apply. When a user accesses the workspace in [resource-context mode](#access-mode), only resource permissions are verified, and workspace permissions are ignored. Enable Azure RBAC for a user by removing them from workspace permissions and allowing their resource permissions to be recognized. + + This is the default setting for all workspaces created after March 2019. + + > [!NOTE] + > If a user has only resource permissions to the workspace, they are only able to access the workspace using resource-context mode assuming the workspace access mode is set to **Use resource or workspace permissions**. + +### Configure access control mode for a workspace + -1. Sign in to the Azure portal at [https://portal.azure.com](https://portal.azure.com). -1. In the Azure portal, select Log Analytics workspaces > your workspace. +# [Azure portal](#tab/portal) + +View the current workspace access control mode on the **Overview** page for the workspace in the **Log Analytics workspace** menu. + +![View workspace access control mode](media/manage-access/view-access-control-mode.png) -You can change this setting from the **Properties** page of the workspace. Changing the setting will be disabled if you don't have permissions to configure the workspace. +Change this setting from the **Properties** page of the workspace. Changing the setting will be disabled if you don't have permissions to configure the workspace. ![Change workspace access mode](media/manage-access/change-access-control-mode.png) -### Using PowerShell +# [PowerShell](#tab/powershell) -Use the following command to examine the access control mode for all workspaces in the subscription: +Use the following command to view the access control mode for all workspaces in the subscription: ```powershell Get-AzResource -ResourceType Microsoft.OperationalInsights/workspaces -ExpandProperties | foreach {$_.Name + ": " + $_.Properties.features.enableLogAccessUsingOnlyResourcePermissions} @@ -58,13 +99,13 @@ DefaultWorkspace38917: True DefaultWorkspace21532: False ``` -A value of `False` means the workspace is configured with the workspace-context access mode. A value of `True` means the workspace is configured with the resource-context access mode. +A value of `False` means the workspace is configured with *workspace-context* access mode. A value of `True` means the workspace is configured with *resource-context* access mode. > [!NOTE] > If a workspace is returned without a boolean value and is blank, this also matches the results of a `False` value. > -Use the following script to set the access control mode for a specific workspace to the resource-context permission: +Use the following script to set the access control mode for a specific workspace to *resource-context* permission: ```powershell $WSName = "my-workspace" @@ -76,7 +117,7 @@ else Set-AzResource -ResourceId $Workspace.ResourceId -Properties $Workspace.Properties -Force ``` -Use the following script to set the access control mode for all workspaces in the subscription to the resource-context permission: +Use the following script to set the access control mode for all workspaces in the subscription to *resource-context* permission: ```powershell Get-AzResource -ResourceType Microsoft.OperationalInsights/workspaces -ExpandProperties | foreach { @@ -88,78 +129,84 @@ Set-AzResource -ResourceId $_.ResourceId -Properties $_.Properties -Force } ``` -### Using a Resource Manager template +# [Resource Manager](#tab/arm) To configure the access mode in an Azure Resource Manager template, set the **enableLogAccessUsingOnlyResourcePermissions** feature flag on the workspace to one of the following values. -* **false**: Set the workspace to workspace-context permissions. This is the default setting if the flag isn't set. -* **true**: Set the workspace to resource-context permissions. +* **false**: Set the workspace to *workspace-context* permissions. This is the default setting if the flag isn't set. +* **true**: Set the workspace to *resource-context* permissions. -## Manage access using workspace permissions - -Each workspace can have multiple accounts associated with it, and each account can have access to multiple workspaces. Access is managed using [Azure role-based access control (Azure RBAC)](../../role-based-access-control/role-assignments-portal.md). +--- -The following activities also require Azure permissions: +## Azure RBAC +Access to a workspace is managed using [Azure role-based access control (Azure RBAC)](../../role-based-access-control/role-assignments-portal.md). To grant access to the Log Analytics workspace using Azure permissions, follow the steps in [assign Azure roles to manage access to your Azure subscription resources](../../role-based-access-control/role-assignments-portal.md). +### Workspace permissions +Each workspace can have multiple accounts associated with it, and each account can have access to multiple workspaces. The following table lists the Azure permissions for different workspace actions: |Action |Azure Permissions Needed |Notes | |-------|-------------------------|------| -| Adding and removing monitoring solutions | `Microsoft.Resources/deployments/*`
    `Microsoft.OperationalInsights/*`
    `Microsoft.OperationsManagement/*`
    `Microsoft.Automation/*`
    `Microsoft.Resources/deployments/*/write` | These permissions need to be granted at resource group or subscription level. | -| Changing the pricing tier | `Microsoft.OperationalInsights/workspaces/*/write` | | -| Viewing data in the *Backup* and *Site Recovery* solution tiles | Administrator / Co-administrator | Accesses resources deployed using the classic deployment model | -| Creating a workspace in the Azure portal | `Microsoft.Resources/deployments/*`
    `Microsoft.OperationalInsights/workspaces/*` || -| View workspace basic properties and enter the workspace blade in the portal | `Microsoft.OperationalInsights/workspaces/read` || -| Query logs using any interface | `Microsoft.OperationalInsights/workspaces/query/read` || -| Access all log types using queries | `Microsoft.OperationalInsights/workspaces/query/*/read` || -| Access a specific log table | `Microsoft.OperationalInsights/workspaces/query//read` || -| Read the workspace keys to allow sending logs to this workspace | `Microsoft.OperationalInsights/workspaces/sharedKeys/action` || +| Change the pricing tier | `Microsoft.OperationalInsights/workspaces/*/write` | +| Creating a workspace in the Azure portal | `Microsoft.Resources/deployments/*`
    `Microsoft.OperationalInsights/workspaces/*` | +| View workspace basic properties and enter the workspace blade in the portal | `Microsoft.OperationalInsights/workspaces/read` | +| Query logs using any interface | `Microsoft.OperationalInsights/workspaces/query/read` | +| Access all log types using queries | `Microsoft.OperationalInsights/workspaces/query/*/read` | +| Access a specific log table | `Microsoft.OperationalInsights/workspaces/query//read` | +| Read the workspace keys to allow sending logs to this workspace | `Microsoft.OperationalInsights/workspaces/sharedKeys/action` | +| Add and remove monitoring solutions | `Microsoft.Resources/deployments/*`
    `Microsoft.OperationalInsights/*`
    `Microsoft.OperationsManagement/*`
    `Microsoft.Automation/*`
    `Microsoft.Resources/deployments/*/write`

    These permissions need to be granted at resource group or subscription level. | +| View data in the *Backup* and *Site Recovery* solution tiles | Administrator / Co-administrator

    Accesses resources deployed using the classic deployment model | + +### Built-in roles +Assign users to these roles to give them access at different scopes: -## Manage access using Azure permissions +* Subscription - Access to all workspaces in the subscription +* Resource Group - Access to all workspace in the resource group +* Resource - Access to only the specified workspace -To grant access to the Log Analytics workspace using Azure permissions, follow the steps in [assign Azure roles to manage access to your Azure subscription resources](../../role-based-access-control/role-assignments-portal.md). For example custom roles, see [Example custom roles](#custom-role-examples) +Create assignments at the resource level (workspace) to assure accurate access control. Use [custom roles](../../role-based-access-control/custom-roles.md) to create roles with the specific permissions needed. -Azure has two built-in user roles for Log Analytics workspaces: +> [!NOTE] +> To add and remove users to a user role, you must to have `Microsoft.Authorization/*/Delete` and `Microsoft.Authorization/*/Write` permission. -* Log Analytics Reader -* Log Analytics Contributor + +#### Log Analytics Reader +Members of the *Log Analytics Reader* role can view all monitoring data and monitoring settings, including the configuration of Azure diagnostics on all Azure resources. Members of the *Log Analytics Reader* role can: -* View and search all monitoring data -* View monitoring settings, including viewing the configuration of Azure diagnostics on all Azure resources. +- View and search all monitoring data +- View monitoring settings, including viewing the configuration of Azure diagnostics on all Azure resources. -The Log Analytics Reader role includes the following Azure actions: +*Log Analytics Reader* includes the following Azure actions: | Type | Permission | Description | | ------- | ---------- | ----------- | -| Action | `*/read` | Ability to view all Azure resources and resource configuration. Includes viewing:
    Virtual machine extension status
    Configuration of Azure diagnostics on resources
    All properties and settings of all resources.
    For workspaces, it allows full unrestricted permissions to read the workspace settings and perform query on the data. See more granular options above. | -| Action | `Microsoft.OperationalInsights/workspaces/analytics/query/action` | Deprecated, no need to assign them to users. | -| Action | `Microsoft.OperationalInsights/workspaces/search/action` | Deprecated, no need to assign them to users. | +| Action | `*/read` | Ability to view all Azure resources and resource configuration.
    Includes viewing:
    - Virtual machine extension status
    - Configuration of Azure diagnostics on resources
    - All properties and settings of all resources.

    For workspaces, allows full unrestricted permissions to read the workspace settings and query data. See more granular options above. | | Action | `Microsoft.Support/*` | Ability to open support cases | |Not Action | `Microsoft.OperationalInsights/workspaces/sharedKeys/read` | Prevents reading of workspace key required to use the data collection API and to install agents. This prevents the user from adding new resources to the workspace | +| Action | `Microsoft.OperationalInsights/workspaces/analytics/query/action` | Deprecated. | +| Action | `Microsoft.OperationalInsights/workspaces/search/action` | Deprecated. | +#### Log Analytics Contributor Members of the *Log Analytics Contributor* role can: -* Includes all the privileges of the *Log Analytics Reader role*, allowing the user to read all monitoring data -* Create and configure Automation accounts -* Add and remove management solutions - - > [!NOTE] - > In order to successfully perform the last two actions, this permission needs to be granted at the resource group or subscription level. +- Read all monitoring data granted by the *Log Analytics Reader role*. +- Edit monitoring settings for Azure resources, including + - Adding the VM extension to VMs + - Configuring Azure diagnostics on all Azure resources +- Create and configure Automation accounts. Permission needs to be granted at the resource group or subscription level. +- Add and remove management solutions. Permission needs to be granted at the resource group or subscription level. +- Read storage account keys +- Configure the collection of logs from Azure Storage -* Read storage account keys -* Configure the collection of logs from Azure Storage -* Edit monitoring settings for Azure resources, including - * Adding the VM extension to VMs - * Configuring Azure diagnostics on all Azure resources -> [!NOTE] -> You can use the ability to add a virtual machine extension to a virtual machine to gain full control over a virtual machine. +> [!WARNING] +> You can use the permission to add a virtual machine extension to a virtual machine to gain full control over a virtual machine. The Log Analytics Contributor role includes the following Azure actions: | Permission | Description | | ---------- | ----------- | -| `*/read` | Ability to view all resources and resource configuration. Includes viewing:
    Virtual machine extension status
    Configuration of Azure diagnostics on resources
    All properties and settings of all resources.
    For workspaces, it allows full unrestricted permissions to read the workspace setting and perform query on the data. See more granular options above. | +| `*/read` | Ability to view all Azure resources and resource configuration.

    Includes viewing:
    - Virtual machine extension status
    - Configuration of Azure diagnostics on resources
    - All properties and settings of all resources.

    For workspaces, allows full unrestricted permissions to read the workspace settings and query data. See more granular options above. | | `Microsoft.Automation/automationAccounts/*` | Ability to create and configure Azure Automation accounts, including adding and editing runbooks | | `Microsoft.ClassicCompute/virtualMachines/extensions/*`
    `Microsoft.Compute/virtualMachines/extensions/*` | Add, update and remove virtual machine extensions, including the Microsoft Monitoring Agent extension and the OMS Agent for Linux extension | | `Microsoft.ClassicStorage/storageAccounts/listKeys/action`
    `Microsoft.Storage/storageAccounts/listKeys/action` | View the storage account key. Required to configure Log Analytics to read logs from Azure storage accounts | @@ -170,19 +217,11 @@ The Log Analytics Contributor role includes the following Azure actions: | `Microsoft.Resources/deployments/*` | Create and delete deployments. Required for adding and removing solutions, workspaces, and automation accounts | | `Microsoft.Resources/subscriptions/resourcegroups/deployments/*` | Create and delete deployments. Required for adding and removing solutions, workspaces, and automation accounts | -To add and remove users to a user role, it is necessary to have `Microsoft.Authorization/*/Delete` and `Microsoft.Authorization/*/Write` permission. - -Use these roles to give users access at different scopes: - -* Subscription - Access to all workspaces in the subscription -* Resource Group - Access to all workspace in the resource group -* Resource - Access to only the specified workspace -We recommend performing assignments at the resource level (workspace) to assure accurate access control. Use [custom roles](../../role-based-access-control/custom-roles.md) to create roles with the specific permissions needed. ### Resource permissions -When users query logs from a workspace using resource-context access, they'll have the following permissions on the resource: +When users query logs from a workspace using [resource-context access](#access-mode), they'll have the following permissions on the resource: | Permission | Description | | ---------- | ----------- | @@ -191,61 +230,56 @@ When users query logs from a workspace using resource-context access, they'll ha `/read` permission is usually granted from a role that includes _\*/read or_ _\*_ permissions such as the built-in [Reader](../../role-based-access-control/built-in-roles.md#reader) and [Contributor](../../role-based-access-control/built-in-roles.md#contributor) roles. Custom roles that include specific actions or dedicated built-in roles might not include this permission. -See [Defining per-table access control](#table-level-azure-rbac) below if you want to create different access control for different tables. - -## Custom role examples - -1. To grant a user access to log data from their resources, perform the following: - - * Configure the workspace access control mode to **use workspace or resource permissions** - * Grant users `*/read` or `Microsoft.Insights/logs/*/read` permissions to their resources. If they are already assigned the [Log Analytics Reader](../../role-based-access-control/built-in-roles.md#reader) role on the workspace, it is sufficient. +### Custom role examples +In addition to using the built-in roles for Log Analytics workspace, you can create custom roles to assign more granular permissions. Following are some common examples. -2. To grant a user access to log data from their resources and configure their resources to send logs to the workspace, perform the following: +**Grant a user access to log data from their resources.** - * Configure the workspace access control mode to **use workspace or resource permissions** +- Configure the workspace access control mode to **use workspace or resource permissions** +- Grant users `*/read` or `Microsoft.Insights/logs/*/read` permissions to their resources. If they are already assigned the [Log Analytics Reader](../../role-based-access-control/built-in-roles.md#reader) role on the workspace, it is sufficient. - * Grant users the following permissions on the workspace: `Microsoft.OperationalInsights/workspaces/read` and `Microsoft.OperationalInsights/workspaces/sharedKeys/action`. With these permissions, users cannot perform any workspace-level queries. They can only enumerate the workspace and use it as a destination for diagnostic settings or agent configuration. +**Grant a user access to log data from their resources and configure their resources to send logs to the workspace.** - * Grant users the following permissions to their resources: `Microsoft.Insights/logs/*/read` and `Microsoft.Insights/diagnosticSettings/write`. If they are already assigned the [Log Analytics Contributor](../../role-based-access-control/built-in-roles.md#contributor) role, assigned the Reader role, or granted `*/read` permissions on this resource, it is sufficient. +- Configure the workspace access control mode to **use workspace or resource permissions** +- Grant users the following permissions on the workspace: `Microsoft.OperationalInsights/workspaces/read` and `Microsoft.OperationalInsights/workspaces/sharedKeys/action`. With these permissions, users cannot perform any workspace-level queries. They can only enumerate the workspace and use it as a destination for diagnostic settings or agent configuration. +- Grant users the following permissions to their resources: `Microsoft.Insights/logs/*/read` and `Microsoft.Insights/diagnosticSettings/write`. If they are already assigned the [Log Analytics Contributor](../../role-based-access-control/built-in-roles.md#contributor) role, assigned the Reader role, or granted `*/read` permissions on this resource, it is sufficient. -3. To grant a user access to log data from their resources without being able to read security events and send data, perform the following: +**Grant a user access to log data from their resources without being able to read security events and send data.** - * Configure the workspace access control mode to **use workspace or resource permissions** +- Configure the workspace access control mode to **use workspace or resource permissions** +- Grant users the following permissions to their resources: `Microsoft.Insights/logs/*/read`. +- Add the following NonAction to block users from reading the SecurityEvent type: `Microsoft.Insights/logs/SecurityEvent/read`. The NonAction shall be in the same custom role as the action that provides the read permission (`Microsoft.Insights/logs/*/read`). If the user inherent the read action from another role that is assigned to this resource or to the subscription or resource group, they would be able to read all log types. This is also true if they inherit `*/read`, that exist for example, with the Reader or Contributor role. - * Grant users the following permissions to their resources: `Microsoft.Insights/logs/*/read`. +**Grant a user access to log data from their resources and read all Azure AD sign-in and read Update Management solution log data from the workspace.** - * Add the following NonAction to block users from reading the SecurityEvent type: `Microsoft.Insights/logs/SecurityEvent/read`. The NonAction shall be in the same custom role as the action that provides the read permission (`Microsoft.Insights/logs/*/read`). If the user inherent the read action from another role that is assigned to this resource or to the subscription or resource group, they would be able to read all log types. This is also true if they inherit `*/read`, that exist for example, with the Reader or Contributor role. - -4. To grant a user access to log data from their resources and read all Azure AD sign-in and read Update Management solution log data from the workspace, perform the following: - - * Configure the workspace access control mode to **use workspace or resource permissions** - - * Grant users the following permissions on the workspace: - - * `Microsoft.OperationalInsights/workspaces/read` – required so the user can enumerate the workspace and open the workspace blade in the Azure portal - * `Microsoft.OperationalInsights/workspaces/query/read` – required for every user that can execute queries - * `Microsoft.OperationalInsights/workspaces/query/SigninLogs/read` – to be able to read Azure AD sign-in logs - * `Microsoft.OperationalInsights/workspaces/query/Update/read` – to be able to read Update Management solution logs - * `Microsoft.OperationalInsights/workspaces/query/UpdateRunProgress/read` – to be able to read Update Management solution logs - * `Microsoft.OperationalInsights/workspaces/query/UpdateSummary/read` – to be able to read Update management logs - * `Microsoft.OperationalInsights/workspaces/query/Heartbeat/read` – required to be able to use Update Management solution - * `Microsoft.OperationalInsights/workspaces/query/ComputerGroup/read` – required to be able to use Update Management solution - - * Grant users the following permissions to their resources: `*/read`, assigned to the Reader role, or `Microsoft.Insights/logs/*/read`. +- Configure the workspace access control mode to **use workspace or resource permissions** +- Grant users the following permissions on the workspace: + - `Microsoft.OperationalInsights/workspaces/read` – required so the user can enumerate the workspace and open the workspace blade in the Azure portal + - `Microsoft.OperationalInsights/workspaces/query/read` – required for every user that can execute queries + - `Microsoft.OperationalInsights/workspaces/query/SigninLogs/read` – to be able to read Azure AD sign-in logs + - `Microsoft.OperationalInsights/workspaces/query/Update/read` – to be able to read Update Management solution logs + - `Microsoft.OperationalInsights/workspaces/query/UpdateRunProgress/read` – to be able to read Update Management solution logs + - `Microsoft.OperationalInsights/workspaces/query/UpdateSummary/read` – to be able to read Update management logs + - `Microsoft.OperationalInsights/workspaces/query/Heartbeat/read` – required to be able to use Update Management solution + - `Microsoft.OperationalInsights/workspaces/query/ComputerGroup/read` – required to be able to use Update Management solution +- Grant users the following permissions to their resources: `*/read`, assigned to the Reader role, or `Microsoft.Insights/logs/*/read`. ## Table level Azure RBAC +Table level Azure RBAC allows you to define more granular control to data in a Log Analytics workspace by defining specific data types that are accessible only to a specific set of users. + +Implement table access control with [Azure custom roles](../../role-based-access-control/custom-roles.md) to either grant access to specific [tables](../logs/data-platform-logs.md) in the workspace. These roles are applied to workspaces with either workspace-context or resource-context [access control modes](#access-control-mode) regardless of the user's [access mode](#access-mode). -**Table level Azure RBAC** allows you to define more granular control to data in a Log Analytics workspace in addition to the other permissions. This control allows you to define specific data types that are accessible only to a specific set of users. +Create a [custom role](../../role-based-access-control/custom-roles.md) with the following actions to define access to a particular table. -You implement table access control with [Azure custom roles](../../role-based-access-control/custom-roles.md) to either grant access to specific [tables](../logs/data-platform-logs.md) in the workspace. These roles are applied to workspaces with either workspace-context or resource-context [access control modes](../logs/design-logs-deployment.md#access-control-mode) regardless of the user's [access mode](../logs/design-logs-deployment.md#access-mode). +* Include the **Actions** section of the role definition. To subtract access from the allowed **Actions**, include it in the **NotActions** section. +* Use `Microsoft.OperationalInsights/workspaces/query/*` to specify all tables. -Create a [custom role](../../role-based-access-control/custom-roles.md) with the following actions to define access to table access control. -* To grant access to a table, include it in the **Actions** section of the role definition. To subtract access from the allowed **Actions**, include it in the **NotActions** section. -* Use Microsoft.OperationalInsights/workspaces/query/* to specify all tables. +### Examples +Following are examples of custom role actions to grant and deny access to specific tables. -For example, to create a role with access to the _Heartbeat_ and _AzureActivity_ tables, create a custom role using the following actions: +**Grant access to the _Heartbeat_ and _AzureActivity_ tables.** ``` "Actions": [ @@ -256,7 +290,7 @@ For example, to create a role with access to the _Heartbeat_ and _AzureActivity_ ], ``` -To create a role with access to only the _SecurityBaseline_ table, create a custom role using the following actions: +**Grant access to only the _SecurityBaseline_ table.** ``` "Actions": [ @@ -265,7 +299,9 @@ To create a role with access to only the _SecurityBaseline_ table, create a cust "Microsoft.OperationalInsights/workspaces/query/SecurityBaseline/read" ], ``` -The examples above define a list of tables that are allowed. This example shows blocked list definition when a user can access all tables but the _SecurityAlert_ table: + + +**Grant access to all tables except the _SecurityAlert_ table.** ``` "Actions": [ @@ -280,9 +316,12 @@ The examples above define a list of tables that are allowed. This example shows ### Custom logs - Custom logs are created from data sources such as custom logs and HTTP Data Collector API. The easiest way to identify the type of log is by checking the tables listed under [Custom Logs in the log schema](./log-analytics-tutorial.md#view-table-information). + Custom logs are tables created from data sources such as [text logs](../agents/data-sources-custom-logs.md) and [HTTP Data Collector API](data-collector-api.md). The easiest way to identify the type of log is by checking the tables listed under [Custom Logs in the log schema](./log-analytics-tutorial.md#view-table-information). - You can't grant access to individual custom logs, but you can grant access to all custom logs. To create a role with access to all custom logs, create a custom role using the following actions: +> [!NOTE] +> Tables created by the [custom logs API](../essentials/../logs/custom-logs-overview.md) does not yet support table level RBAC. + + You can't grant access to individual custom logs tables, but you can grant access to all custom logs. To create a role with access to all custom log tables, create a custom role using the following actions: ``` "Actions": [ @@ -291,17 +330,18 @@ The examples above define a list of tables that are allowed. This example shows "Microsoft.OperationalInsights/workspaces/query/Tables.Custom/read" ], ``` -An alternative approach to manage access to custom logs is to assign them to an Azure resource and manage access using the resource-context paradigm. To use this method, you must include the resource ID by specifying it in the [x-ms-AzureResourceId](../logs/data-collector-api.md#request-headers) header when data is ingested to Log Analytics via the [HTTP Data Collector API](../logs/data-collector-api.md). The resource ID must be valid and have access rules applied to it. After the logs are ingested, they are accessible to those with read access to the resource, as explained here. -Sometimes custom logs come from sources that are not directly associated to a specific resource. In this case, create a resource group just to manage access to these logs. The resource group does not incur any cost, but gives you a valid resource ID to control access to the custom logs. For example, if a specific firewall is sending custom logs, create a resource group called "MyFireWallLogs" and make sure that the API requests contain the resource ID of "MyFireWallLogs". The firewall log records are then accessible only to users that were granted access to either MyFireWallLogs or those with full workspace access. +An alternative approach to manage access to custom logs is to assign them to an Azure resource and manage access using resource-context access control.Include the resource ID by specifying it in the [x-ms-AzureResourceId](../logs/data-collector-api.md#request-headers) header when data is ingested to Log Analytics via the [HTTP Data Collector API](../logs/data-collector-api.md). The resource ID must be valid and have access rules applied to it. After the logs are ingested, they are accessible to users with read access to the resource. + +Some custom logs come from sources that are not directly associated to a specific resource. In this case, create a resource group to manage access to these logs. The resource group does not incur any cost, but gives you a valid resource ID to control access to the custom logs. For example, if a specific firewall is sending custom logs, create a resource group called *MyFireWallLogs* and make sure that the API requests contain the resource ID of *MyFireWallLogs*. The firewall log records are then accessible only to users that were granted access to either MyFireWallLogs or those with full workspace access ### Considerations -* If a user is granted global read permission with the standard Reader or Contributor roles that include the _\*/read_ action, it will override the per-table access control and give them access to all log data. -* If a user is granted per-table access but no other permissions, they would be able to access log data from the API but not from the Azure portal. To provide access from the Azure portal, use Log Analytics Reader as its base role. -* Administrators and owners of the subscription will have access to all data types regardless of any other permission settings. -* Workspace owners are treated like any other user for per-table access control. -* We recommend assigning roles to security groups instead of individual users to reduce the number of assignments. This will also help you use existing group management tools to configure and verify access. +- If a user is granted global read permission with the standard Reader or Contributor roles that include the _\*/read_ action, it will override the per-table access control and give them access to all log data. +- If a user is granted per-table access but no other permissions, they would be able to access log data from the API but not from the Azure portal. To provide access from the Azure portal, use Log Analytics Reader as its base role. +- Administrators and owners of the subscription will have access to all data types regardless of any other permission settings. +- Workspace owners are treated like any other user for per-table access control. +- Assign roles to security groups instead of individual users to reduce the number of assignments. This will also help you use existing group management tools to configure and verify access. ## Next steps diff --git a/articles/azure-monitor/logs/oms-portal-transition.md b/articles/azure-monitor/logs/oms-portal-transition.md index 150fc050f1cb..d553dff5bb84 100644 --- a/articles/azure-monitor/logs/oms-portal-transition.md +++ b/articles/azure-monitor/logs/oms-portal-transition.md @@ -39,7 +39,7 @@ While most features will continue to work without performing any migration, you Refer to [Common questions for transition from OMS portal to Azure portal for Log Analytics users](../overview.md) for information about how to transition to the Azure portal. ## User access and role migration -Azure portal access management is richer and more powerful than the access management in the OMS Portal. See [Designing your Azure Monitor Logs workspace](../logs/design-logs-deployment.md) for details of access management in Log Analytics. +Azure portal access management is richer and more powerful than the access management in the OMS Portal. See [Designing your Azure Monitor Logs workspace](../logs/workspace-design.md) for details of access management in Log Analytics. > [!NOTE] > Previous versions of this article stated that the permissions would automatically be converted from the OMS portal to the Azure portal. This automatic conversion is no longer planned, and you must perform the conversion yourself. diff --git a/articles/azure-monitor/logs/tutorial-custom-logs-api.md b/articles/azure-monitor/logs/tutorial-custom-logs-api.md index 0bcc3dfac7a4..7dcc8216b5e7 100644 --- a/articles/azure-monitor/logs/tutorial-custom-logs-api.md +++ b/articles/azure-monitor/logs/tutorial-custom-logs-api.md @@ -26,7 +26,7 @@ In this tutorial, you learn to: ## Prerequisites To complete this tutorial, you need the following: -- Log Analytics workspace where you have at least [contributor rights](manage-access.md#manage-access-using-azure-permissions) . +- Log Analytics workspace where you have at least [contributor rights](manage-access.md#azure-rbac) . - [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. ## Collect workspace details diff --git a/articles/azure-monitor/logs/tutorial-custom-logs.md b/articles/azure-monitor/logs/tutorial-custom-logs.md index 1c6ecca24bc4..3760defb4a42 100644 --- a/articles/azure-monitor/logs/tutorial-custom-logs.md +++ b/articles/azure-monitor/logs/tutorial-custom-logs.md @@ -23,7 +23,7 @@ In this tutorial, you learn to: ## Prerequisites To complete this tutorial, you need the following: -- Log Analytics workspace where you have at least [contributor rights](manage-access.md#manage-access-using-azure-permissions) . +- Log Analytics workspace where you have at least [contributor rights](manage-access.md#azure-rbac) . - [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. diff --git a/articles/azure-monitor/logs/tutorial-ingestion-time-transformations-api.md b/articles/azure-monitor/logs/tutorial-ingestion-time-transformations-api.md index 8f194fd5766a..6007b4378045 100644 --- a/articles/azure-monitor/logs/tutorial-ingestion-time-transformations-api.md +++ b/articles/azure-monitor/logs/tutorial-ingestion-time-transformations-api.md @@ -27,8 +27,10 @@ In this tutorial, you learn to: ## Prerequisites To complete this tutorial, you need the following: -- Log Analytics workspace where you have at least [contributor rights](manage-access.md#manage-access-using-azure-permissions) . -- [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. +- Log Analytics workspace where you have at least [contributor rights](manage-access.md#azure-rbac). +- [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. +- The table must already have some data. +- The table can't be linked to the [workspace's transformation DCR](../essentials/data-collection-rule-overview.md#types-of-data-collection-rules). ## Overview of tutorial diff --git a/articles/azure-monitor/logs/tutorial-ingestion-time-transformations.md b/articles/azure-monitor/logs/tutorial-ingestion-time-transformations.md index b68fd47d77f9..ed371a98a4c1 100644 --- a/articles/azure-monitor/logs/tutorial-ingestion-time-transformations.md +++ b/articles/azure-monitor/logs/tutorial-ingestion-time-transformations.md @@ -23,8 +23,10 @@ In this tutorial, you learn to: ## Prerequisites To complete this tutorial, you need the following: -- Log Analytics workspace where you have at least [contributor rights](manage-access.md#manage-access-using-azure-permissions) . -- [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. +- Log Analytics workspace where you have at least [contributor rights](manage-access.md#azure-rbac). +- [Permissions to create Data Collection Rule objects](../essentials/data-collection-rule-overview.md#permissions) in the workspace. +- The table must already have some data. +- The table can't be linked to the [workspace's transformation DCR](../essentials/data-collection-rule-overview.md#types-of-data-collection-rules). ## Overview of tutorial diff --git a/articles/azure-monitor/logs/service-providers.md b/articles/azure-monitor/logs/workspace-design-service-providers.md similarity index 95% rename from articles/azure-monitor/logs/service-providers.md rename to articles/azure-monitor/logs/workspace-design-service-providers.md index 89e418afb95b..5b5a0fce9cbc 100644 --- a/articles/azure-monitor/logs/service-providers.md +++ b/articles/azure-monitor/logs/workspace-design-service-providers.md @@ -8,7 +8,7 @@ ms.date: 02/03/2020 --- -# Azure Monitor Logs for Service Providers +# Log Analytics workspace design for service providers Log Analytics workspaces in Azure Monitor can help managed service providers (MSPs), large enterprises, independent software vendors (ISVs), and hosting service providers manage and monitor servers in customer's on-premises or cloud infrastructure. @@ -20,7 +20,7 @@ Log Analytics in Azure Monitor can also be used by a service provider managing c ## Architectures for Service Providers -Log Analytics workspaces provide a method for the administrator to control the flow and isolation of [log](../logs/data-platform-logs.md) data and create an architecture that addresses its specific business needs. [This article](../logs/design-logs-deployment.md) explains the design, deployment, and migration considerations for a workspace, and the [manage access](../logs/manage-access.md) article discusses how to apply and manage permissions to log data. Service providers have additional considerations. +Log Analytics workspaces provide a method for the administrator to control the flow and isolation of [log](../logs/data-platform-logs.md) data and create an architecture that addresses its specific business needs. [This article](../logs/workspace-design.md) explains the design, deployment, and migration considerations for a workspace, and the [manage access](../logs/manage-access.md) article discusses how to apply and manage permissions to log data. Service providers have additional considerations. There are three possible architectures for service providers regarding Log Analytics workspaces: diff --git a/articles/azure-monitor/logs/workspace-design.md b/articles/azure-monitor/logs/workspace-design.md new file mode 100644 index 000000000000..e4a2e11696fe --- /dev/null +++ b/articles/azure-monitor/logs/workspace-design.md @@ -0,0 +1,190 @@ +--- +title: Design a Log Analytics workspace architecture +description: Describes the considerations and recommendations for customers preparing to deploy a workspace in Azure Monitor. +ms.topic: conceptual +ms.date: 05/25/2022 + +--- + +# Design a Log Analytics workspace architecture +While a single [Log Analytics workspace](log-analytics-workspace-overview.md) may be sufficient for many environments using Azure Monitor and Microsoft Sentinel, many organizations will create multiple workspaces to optimize costs and better meet different business requirements. This article presents a set of criteria for determining whether to use a single workspace or multiple workspaces and the configuration and placement of those workspace to meet your particular requirements while optimizing your costs. + +> [!NOTE] +> This article includes both Azure Monitor and Microsoft Sentinel since many customers need to consider both in their design, and most of the decision criteria applies to both. If you only use one of these services, then you can simply ignore the other in your evaluation. + +## Design strategy +Your design should always start with a single workspace since this reduces the complexity of managing multiple workspaces and in querying data from them. There are no performance limitations from the amount of data in your workspace, and multiple services and data sources can send data to the same workspace. As you identify criteria to create additional workspaces, your design should use the fewest number that will match your particular requirements. + +Designing a workspace configuration includes evaluation of multiple criteria, some of which may in conflict. For example, you may be able to reduce egress charges by creating a separate workspace in each Azure region, but consolidating into a single workspace might allow you to reduce charges even more with a commitment tier. Evaluate each of the criteria below independently and consider your particular requirements and priorities in determining which design will be most effective for your particular environment. + + +## Design criteria +The following table briefly presents the criteria that you should consider in designing your workspace architecture. The sections below describe each of these criteria in full detail. + +| Criteria | Description | +|:---|:---| +| [Segregate operational and security data](#segregate-operational-and-security-data) | Many customers will create separate workspaces for their operational and security data for data ownership and the additional cost from Microsoft Sentinel. In some cases though, you may be able to save cost by consolidating into a single workspace to qualify for a commitment tier. | +| [Azure tenants](#azure-tenants) | If you have multiple Azure tenants, you'll usually create a workspace in each because several data sources can only send monitoring data to a workspace in the same Azure tenant. | +| [Azure regions](#azure-regions) | Each workspace resides in a particular Azure region, and you may have regulatory or compliance requirements to store data in particular locations. | +| [Data ownership](#data-ownership) | You may choose to create separate workspaces to define data ownership, for example by subsidiaries or affiliated companies. | +| [Split billing](#split-billing) | By placing workspaces in separate subscriptions, they can be billed to different parties. | +| [Data retention and archive](#data-retention-and-archive) | You can set different retention settings for each table in a workspace, but you need a separate workspace if you require different retention settings for different resources that send data to the same tables. | +| [Commitment tiers](#commitment-tiers) | Commitment tiers allow you to reduce your ingestion cost by committing to a minimum amount of daily data in a single workspace. | +| [Legacy agent limitations](#legacy-agent-limitations) | Legacy virtual machine agents have limitations on the number of workspaces they can connect to. | +| [Data access control](#data-access-control) | Configure access to the workspace and to different tables and data from different resources. | + +### Segregate operational and security data +Most customers who use both Azure Monitor and Microsoft Sentinel will create a dedicated workspace for each to segregate ownership of data between your operational and security teams and also to optimize costs. If Microsoft Sentinel is enabled in a workspace, then all data in that workspace is subject to Sentinel pricing, even if it's operational data collected by Azure Monitor. While a workspace with Sentinel gets 3 months of free data retention instead of 31 days, this will typically result in higher cost for operational data in a workspace without Sentinel. See [Azure Monitor Logs pricing details](cost-logs.md#workspaces-with-microsoft-sentinel). + +The exception is if combining data in the same workspace helps you reach a [commitment tier](#commitment-tiers), which provides a discount to your ingestion charges. For example, consider an organization that has operational data and security data each ingesting about 50 GB per day. Combining the data in the same workspace would allow a commitment tier at 100 GB per day that would provide a 15% discount for Azure Monitor and 50% discount for Sentinel. + +If you create separate workspaces for other criteria then you'll usually create additional workspace pairs. For example, if you have two Azure tenants, you may create four workspaces - an operational and security workspace in each tenant. + + +- **If you use both Azure Monitor and Microsoft Sentinal**, create a separate workspace for each. Consider combining the two if it helps you reach a commitment tier. + + +### Azure tenants +Most resources can only send monitoring data to a workspace in the same Azure tenant. Virtual machines using the [Azure Monitor agent](../agents/azure-monitor-agent-overview.md) or the [Log Analytics agents](../agents/log-analytics-agent.md) can send data to workspaces in separate Azure tenants, which may be a scenario that you consider as a [service provider](#multiple-tenant-strategies). + +- **If you have a single Azure tenant**, then create a single workspace for that tenant. +- **If you have multiple Azure tenants**, then create a workspace for each tenant. See [Multiple tenant strategies](#multiple-tenant-strategies) for other options including strategies for service providers. + +### Azure regions +Log Analytics workspaces each reside in a [particular Azure region](https://azure.microsoft.com/global-infrastructure/geographies/), and you may have regulatory or compliance purposes for keeping data in a particular region. For example, an international company might locate a workspace in each major geographical region, such as United States and Europe. + +- **If you have requirements for keeping data in a particular geography**, create a separate workspace for each region with such requirements. +- **If you do not have requirements for keeping data in a particular geography**, use a single workspace for all regions. + +You should also consider potential [bandwidth charges](https://azure.microsoft.com/pricing/details/bandwidth/) that may apply when sending data to a workspace from a resource in another region, although these charges are usually minor relative to data ingestion costs for most customers. These charges will typically result from sending data to the workspace from a virtual machine. Monitoring data from other Azure resources using [diagnostic settings](../essentials/diagnostic-settings.md) does not [incur egress charges](../usage-estimated-costs.md#data-transfer-charges). + +Use the [Azure pricing calculator](https://azure.microsoft.com/pricing/calculator) to estimate the cost and determine which regions you actually need. Consider workspaces in multiple regions if bandwidth charges are significant. + + +- **If bandwidth charges are significant enough to justify the additional complexity**, create a separate workspace for each region with virtual machines. +- **If bandwidth charges are not significant enough to justify the additional complexity**, use a single workspace for all regions. + + +### Data ownership +You may have a requirement to segregate data or define boundaries based on ownership. For example, you may have different subsidiaries or affiliated companies that require delineation of their monitoring data. + +- **If you require data segregation**, use a separate workspace for each data owner. +- **If you do not require data segregation**, use a single workspace for all data owners. + +### Split billing +You may need to split billing between different parties or perform charge back to a customer or internal business unit. [Azure Cost Management + Billing](../usage-estimated-costs.md#azure-cost-management--billing) allows you to view charges by workspace. You can also use a log query to view [billable data volume by Azure resource, resource group, or subscription](analyze-usage.md#data-volume-by-azure-resource-resource-group-or-subscription), which may be sufficient for your billing requirements. + +- **If you do not need to split billing or perform charge back**, use a single workspace for all cost owners. +- **If you need to split billing or perform charge back**, consider whether [Azure Cost Management + Billing](../usage-estimated-costs.md#azure-cost-management--billing) or a log query provides granular enough cost reporting for your requirements. If not, use a separate workspace for each cost owner. + +### Data retention and archive +You can configure default [data retention and archive settings](data-retention-archive.md) for a workspace or [configure different settings for each table](data-retention-archive.md#set-retention-and-archive-policy-by-table). You may require different settings for different sets of data in a particular table. If this is the case, then you would need to separate that data into different workspaces, each with unique retention settings. + +- **If you can use the same retention and archive settings for all data in each table**, use a single workspace for all resources. +- **If you can require different retention and archive settings for different resources in the same table**, use a separate workspace for different resources. + + + +### Commitment tiers +[Commitment tiers](../logs/cost-logs.md#commitment-tiers) provide a discount to your workspace ingestion costs when you commit to a particular amount of daily data. You may choose to consolidate data in a single workspace in order to reach the level of a particular tier. This same volume of data spread across multiple workspaces would not be eligible for the same tier, unless you have a dedicated cluster. + +If you can commit to daily ingestion of at least 500 GB/day, then you should implement a [dedicated cluster](../logs/cost-logs.md#dedicated-clusters) which provides additional functionality and performance. Dedicated clusters also allow you to combine the data from multiple workspaces in the cluster to reach the level of a commitment tier. + +- **If you will ingest at least 500 GB/day across all resources**, create a dedicated cluster and set the appropriate commitment tier. +- **If you will ingest at least 100 GB/day across resources**, consider combining them into a single workspace to take advantage of a commitment tier. + + + +### Legacy agent limitations +While you should avoid sending duplicate data to multiple workspaces because of the additional charges, you may have virtual machines connected to multiple workspaces. The most common scenario is an agent connected to separate workspaces for Azure Monitor and Microsoft Sentinel. + + The [Azure Monitor agent](../agents/azure-monitor-agent-overview.md) and [Log Analytics agent for Windows](../agents/log-analytics-agent.md) can connect to multiple workspaces. The [Log Analytics agent for Linux](../agents/log-analytics-agent.md) though can only connect to a single workspace. + +- **If you use the Log Analytics agent for Linux**, migrate to the [Azure Monitor agent](../agents/azure-monitor-agent-overview.md) or ensure that your Linux machines only require access to a single workspace. + + +### Data access control +When you grant a user [access to a workspace](manage-access.md#azure-rbac), they have access to all data in that workspace. This is appropriate for a member of a central administration or security team who must access data for all resources. Access to the workspace is also determined by resource-context RBAC and table-level RBAC. + +Resource-context RBAC](manage-access.md#access-mode) +By default, if a user has read access to an Azure resource, they inherit permissions to any of that resource's monitoring data sent to the workspace. This allows users to access information about resources they manage without being granted explicit access to the workspace. If you need to block this access, you can change the [access control mode](manage-access.md#access-control-mode) to require explicit workspace permissions. + +- **If you want users to be able to access data for their resources**, keep the default access control mode of *Use resource or workspace permissions*. +- **If you want to explicitly assign permissions for all users**, change the access control mode to *Require workspace permissions*. + + +[Table-level RBAC](manage-access.md#table-level-azure-rbac) +With table-level RBAC, you can grant or deny access to specific tables in the workspace. This allows you to implement granular permissions required for specific situations in your environment. + +For example, you might grant access to only specific tables collected by Sentinel to an internal auditing team. Or you might deny access to security related tables to resource owners who need operational data related to their resources. + +- **If you don't require granular access control by table**, grant the operations and security team access to their resources and allow resource owners to use resource-context RBAC for their resources. +- **If you require granular access control by table**, grant or deny access to specific tables using table-level RBAC. + + +## Working with multiple workspaces +Since many designs will include multiple workspaces, Azure Monitor and Microsoft Sentinel include features to assist you in analyzing this data across workspaces. For details, see the following: + +- [Create a log query across multiple workspaces and apps in Azure Monitor](cross-workspace-query.md) +- [Extend Microsoft Sentinel across workspaces and tenants](../../sentinel/extend-sentinel-across-workspaces-tenants.md). +## Multiple tenant strategies +Environments with multiple Azure tenants, including service providers (MSPs), independent software vendors (ISVs), and large enterprises, often require a strategy where a central administration team has access to administer workspaces located in other tenants. Each of the tenants may represent separate customers or different business units. + +> [!NOTE] +> For partners and service providers who are part of the [Cloud Solution Provider (CSP) program](https://partner.microsoft.com/membership/cloud-solution-provider), Log Analytics in Azure Monitor is one of the Azure services available in Azure CSP subscriptions. + +There are two basic strategies for this functionality as described below. + +### Distributed architecture +In a distributed architecture, a Log Analytics workspace is created in each Azure tenant. This is the only option you can use if you're monitoring Azure services other than virtual machines. + +There are two options to allow service provider administrators to access the workspaces in the customer tenants. + + +- Use [Azure Lighthouse](../../lighthouse/overview.md) to access each customer tenant. The service provider administrators are included in an Azure AD user group in the service provider’s tenant, and this group is granted access during the onboarding process for each customer. The administrators can then access each customer’s workspaces from within their own service provider tenant, rather than having to log into each customer’s tenant individually. For more information, see [Monitor customer resources at scale](../../lighthouse/how-to/monitor-at-scale.md). + +- Add individual users from the service provider as [Azure Active Directory guest users (B2B)](../../active-directory/external-identities/what-is-b2b.md). The customer tenant administrators manage individual access for each service provider administrator, and the service provider administrators must log in to the directory for each tenant in the Azure portal to be able to access these workspaces. + + +Advantages to this strategy are: + +- Logs can be collected from all types of resources. +- The customer can confirm specific levels of permissions with [Azure delegated resource management](../../lighthouse/concepts/architecture.md), or can manage access to the logs using their own [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). +- Each customer can have different settings for their workspace such as retention and data cap. +- Isolation between customers for regulatory and compliance. +- The charge for each workspace in included in the bill for the customer's subscription. + +Disadvantages to this strategy are: + +- Centrally visualizing and analyzing data across customer tenants with tools such as Azure Monitor Workbooks can result in slower experiences, especially when analyzing data across more than 50 workspaces. +- If customers are not onboarded for Azure delegated resource management, service provider administrators must be provisioned in the customer directory. This makes it more difficult for the service provider to manage a large number of customer tenants at once. +### Centralized +A single workspace is created in the service provider's subscription. This option can only collect data from customer virtual machines. Agents installed on the virtual machines are configured to send their logs to this central workspace. + +Advantages to this strategy are: + +- Easy to manage a large number of customers. +- Service provider has full ownership over the logs and the various artifacts such as functions and saved queries. +- Service provider can perform analytics across all of its customers. + +Disadvantages to this strategy are: + +- Logs can only be collected from virtual machines with an agent. It will not work with PaaS, SaaS and Azure fabric data sources. +- It may be difficult to separate data between customers, since their data shares a single workspace. Queries need to use the computer's fully qualified domain name (FQDN) or the Azure subscription ID. +- All data from all customers will be stored in the same region with a single bill and same retention and configuration settings. + + +### Hybrid +In a hybrid model, each tenant has its own workspace, and some mechanism is used to pull data into a central location for reporting and analytics. This data could include a small number of data types or a summary of the activity such as daily statistics. + +There are two options to implement logs in a central location: + +- Central workspace. The service provider creates a workspace in its tenant and use a script that utilizes the [Query API](api/overview.md) with the [custom logs API](custom-logs-overview.md) to bring the data from the tenant workspaces to this central location. Another option is to use [Azure Logic Apps](../../logic-apps/logic-apps-overview.md) to copy data to the central workspace. + +- Power BI. The tenant workspaces export data to Power BI using the integration between the [Log Analytics workspace and Power BI](log-powerbi.md). + + +## Next steps + +- [Learn more about designing and configuring data access in a workspace.](manage-access.md) +- [Get sample workspace architectures for Microsoft Sentinel.](../../sentinel/sample-workspace-designs.md) diff --git a/articles/azure-monitor/toc.yml b/articles/azure-monitor/toc.yml index b8c4435eec5b..9d6a65b2d256 100644 --- a/articles/azure-monitor/toc.yml +++ b/articles/azure-monitor/toc.yml @@ -142,21 +142,6 @@ items: - name: Logs displayName: Azure Monitor Logs, Logs, Log Analytics, log query, log queries, query, queries href: logs/data-platform-logs.md - - name: Workspaces - items: - - name: Overview - href: logs/log-analytics-workspace-overview.md - - name: Design a workspace deployment - href: logs/design-logs-deployment.md - - name: Design for service providers - href: logs/service-providers.md - - name: Dedicated clusters - href: logs/logs-dedicated-clusters.md - - name: Availability zones - href: logs/availability-zones.md - - name: Log data ingestion time - displayName: latency - href: logs/data-ingestion-time.md - name: Usage and cost items: - name: View and estimate charges @@ -173,8 +158,6 @@ items: href: roles-permissions-security.md - name: Log data href: logs/data-security.md - - name: Azure AD Authentication for Logs - href: logs/azure-ad-authentication-logs.md - name: Customer-managed keys href: logs/customer-managed-keys.md - name: Private Link networking @@ -195,12 +178,8 @@ items: items: - name: Overview href: alerts/alerts-overview.md - - name: Metric alerts - href: alerts/alerts-metric-overview.md - - name: Log alerts - href: alerts/alerts-unified-log.md - - name: Activity log alerts - href: alerts/activity-log-alerts.md + - name: Alert Types + href: alerts/alerts-types.md - name: Partner integrations href: partners.md - name: Security @@ -298,6 +277,8 @@ items: href: logs/daily-cap.md - name: Configure Basic Logs href: logs/basic-logs-configure.md + - name: Use Azure AD authentication + href: logs/azure-ad-authentication-logs.md - name: Archive and restore items: - name: Set retention and archive policy @@ -306,12 +287,23 @@ items: href: logs/search-jobs.md - name: Restore logs href: logs/restore.md + - name: Design + items: + - name: Design a workspace architecture + href: logs/workspace-design.md + - name: Dedicated clusters + href: logs/logs-dedicated-clusters.md + - name: Availability zones + href: logs/availability-zones.md - name: Monitor items: - name: Monitor a workspace href: logs/monitor-workspace.md - name: Analyze usage and cost href: logs/analyze-usage.md + - name: Log data ingestion time + displayName: latency + href: logs/data-ingestion-time.md - name: Move and delete items: - name: Move a workspace @@ -730,7 +722,7 @@ items: href: alerts/action-groups-logic-app.md - name: Alert user interface items: - - name: Manage alert instances + - name: View and manage alert instances href: alerts/alerts-managing-alert-instances.md - name: Troubleshoot alerts href: alerts/alerts-troubleshoot.md diff --git a/articles/azure-monitor/usage-estimated-costs.md b/articles/azure-monitor/usage-estimated-costs.md index 8354d5089616..5f8483806ad1 100644 --- a/articles/azure-monitor/usage-estimated-costs.md +++ b/articles/azure-monitor/usage-estimated-costs.md @@ -20,8 +20,8 @@ Several other features don't have a direct cost, but you instead pay for the ing |:---|:---| | Logs | Ingestion, retention, and export of data in Log Analytics workspaces and legacy Application insights resources. This will typically be the bulk of Azure Monitor charges for most customers. There is no charge for querying this data except in the case of [Basic Logs](logs/basic-logs-configure.md) or [Archived Logs](logs/data-retention-archive.md).

    Charges for Logs can vary significantly on the configuration that you choose. See [Azure Monitor Logs pricing details](logs/cost-logs.md) for details on how charges for Logs data are calculated and the different pricing tiers available. | | Platform Logs | Processing of [diagnostic and auditing information](essentials/resource-logs.md) is charged for [certain services](essentials/resource-logs-categories.md#costs) when sent to destinations other than a Log Analytics workspace. There's no direct charge when this data is sent to a Log Analytics workspace, but there is a charge for the workspace data ingestion and collection. | -| Metrics | There is no charge for [standard metrics](essentials/metrics-supported.md) collected from Azure resources. There is a cost for cost for collecting [custom metrics](essentials/metrics-custom-overview.md) and for retrieving metrics from the [REST API](essentials/rest-api-walkthrough.md#retrieve-metric-values). | -| Alerts | Charged based on the type and number of [signals](alerts/alerts-overview.md#what-you-can-alert-on) used by the alert rule, its frequency, and the type of [notification](alerts/action-groups.md) used in response. For [log alerts](alerts/alerts-unified-log.md) configured for [at scale monitoring](alerts/alerts-unified-log.md#split-by-alert-dimensions), the cost will also depend on the number of time series created by the dimensions resulting from your query. | +| Metrics | There is no charge for [standard metrics](essentials/metrics-supported.md) collected from Azure resources. There is a cost for collecting [custom metrics](essentials/metrics-custom-overview.md) and for retrieving metrics from the [REST API](essentials/rest-api-walkthrough.md#retrieve-metric-values). | +| Alerts | Charged based on the type and number of signals used by the alert rule, its frequency, and the type of [notification](alerts/action-groups.md) used in response. For [Log alerts](alerts/alerts-types.md#log-alerts) configured for [at scale monitoring](alerts/alerts-types.md#splitting-by-dimensions-in-log-alert-rules), the cost will also depend on the number of time series created by the dimensions resulting from your query. | | Web tests | There is a cost for [standard web tests](app/availability-standard-tests.md) and [multi-step web tests](app/availability-multistep.md) in Application Insights. Multi-step web tests have been deprecated. ## Data transfer charges @@ -136,7 +136,7 @@ To view data allocation benefits from sources such as [Microsoft Defender for Se Customers who purchased Microsoft Operations Management Suite E1 and E2 are eligible for per-node data ingestion entitlements for Log Analytics and Application Insights. Each Application Insights node includes up to 200 MB of data ingested per day (separate from Log Analytics data ingestion), with 90-day data retention at no extra cost. -To receive these entitlements for Log Analytics workspaces or Application Insights resources in a subscription, they must be use the Per-Node (OMS) pricing tier. This entitlement isn't visible in the estimated costs shown in the Usage and estimated cost pane. +To receive these entitlements for Log Analytics workspaces or Application Insights resources in a subscription, they must use the Per-Node (OMS) pricing tier. This entitlement isn't visible in the estimated costs shown in the Usage and estimated cost pane. Depending on the number of nodes of the suite that your organization purchased, moving some subscriptions into a Per GB (pay-as-you-go) pricing tier might be advantageous, but this requires careful consideration. diff --git a/articles/azure-monitor/visualize/view-designer.md b/articles/azure-monitor/visualize/view-designer.md index f36b783953fa..89cd26dd51f0 100644 --- a/articles/azure-monitor/visualize/view-designer.md +++ b/articles/azure-monitor/visualize/view-designer.md @@ -36,7 +36,7 @@ The views that you create with View Designer contain the elements that are descr | Visualization parts | Present a visualization of data in the Log Analytics workspace based on one or more [log queries](../logs/log-query-overview.md). Most parts include a header, which provides a high-level visualization, and a list, which displays the top results. Each part type provides a different visualization of the records in the Log Analytics workspace. You select elements in the part to perform a log query that provides detailed records. | ## Required permissions -You require at least [contributor level permissions](../logs/manage-access.md#manage-access-using-azure-permissions) in the Log Analytics workspace to create or modify views. If you don't have this permission, then the View Designer option won't be displayed in the menu. +You require at least [contributor level permissions](../logs/manage-access.md#azure-rbac) in the Log Analytics workspace to create or modify views. If you don't have this permission, then the View Designer option won't be displayed in the menu. ## Work with an existing view diff --git a/articles/azure-monitor/vm/monitor-virtual-machine-configure.md b/articles/azure-monitor/vm/monitor-virtual-machine-configure.md index 17cba6b88317..5e29c22bb8e4 100644 --- a/articles/azure-monitor/vm/monitor-virtual-machine-configure.md +++ b/articles/azure-monitor/vm/monitor-virtual-machine-configure.md @@ -42,7 +42,7 @@ You require at least one Log Analytics workspace to support VM insights and to c Many environments use a single workspace for all their virtual machines and other Azure resources they monitor. You can even share a workspace used by [Microsoft Defender for Cloud and Microsoft Sentinel](monitor-virtual-machine-security.md), although many customers choose to segregate their availability and performance telemetry from security data. If you're getting started with Azure Monitor, start with a single workspace and consider creating more workspaces as your requirements evolve. -For complete details on logic that you should consider for designing a workspace configuration, see [Designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md). +For complete details on logic that you should consider for designing a workspace configuration, see Design a Log Analytics workspace configuration(../logs/workspace-design.md). ### Multihoming agents Multihoming refers to a virtual machine that connects to multiple workspaces. Typically, there's little reason to multihome agents for Azure Monitor alone. Having an agent send data to multiple workspaces most likely creates duplicate data in each workspace, which increases your overall cost. You can combine data from multiple workspaces by using [cross-workspace queries](../logs/cross-workspace-query.md) and [workbooks](../visualizations/../visualize/workbooks-overview.md). diff --git a/articles/azure-monitor/vm/vminsights-configure-workspace.md b/articles/azure-monitor/vm/vminsights-configure-workspace.md index 130afaf6504f..91b1c5cde142 100644 --- a/articles/azure-monitor/vm/vminsights-configure-workspace.md +++ b/articles/azure-monitor/vm/vminsights-configure-workspace.md @@ -30,7 +30,7 @@ Access Log Analytics workspaces in the Azure portal from the **Log Analytics wor [![Log Anlytics workspaces](media/vminsights-configure-workspace/log-analytics-workspaces.png)](media/vminsights-configure-workspace/log-analytics-workspaces.png#lightbox) -You can create a new Log Analytics workspace using any of the following methods. See [Designing your Azure Monitor Logs deployment](../logs/design-logs-deployment.md) for guidance on determining the number of workspaces you should use in your environment and how to design their access strategy. +You can create a new Log Analytics workspace using any of the following methods. See Design a Log Analytics workspace configuration(../logs/workspace-design.md) for guidance on determining the number of workspaces you should use in your environment and how to design their access strategy. * [Azure portal](../logs/quick-create-workspace.md) @@ -46,7 +46,7 @@ VM insights supports a Log Analytics workspace in any of the [regions supported >You can monitor Azure VMs in any region. The VMs themselves aren't limited to the regions supported by the Log Analytics workspace. ## Azure role-based access control -To enable and access the features in VM insights, you must have the [Log Analytics contributor role](../logs/manage-access.md#manage-access-using-azure-permissions) in the workspace. To view performance, health, and map data, you must have the [monitoring reader role](../roles-permissions-security.md#built-in-monitoring-roles) for the Azure VM. For more information about how to control access to a Log Analytics workspace, see [Manage workspaces](../logs/manage-access.md). +To enable and access the features in VM insights, you must have the [Log Analytics contributor role](../logs/manage-access.md#azure-rbac) in the workspace. To view performance, health, and map data, you must have the [monitoring reader role](../roles-permissions-security.md#built-in-monitoring-roles) for the Azure VM. For more information about how to control access to a Log Analytics workspace, see [Manage workspaces](../logs/manage-access.md). ## Add VMInsights solution to workspace Before a Log Analytics workspace can be used with VM insights, it must have the *VMInsights* solution installed. The methods for configuring the workspace are described in the following sections. diff --git a/articles/azure-monitor/vm/vminsights-health-alerts.md b/articles/azure-monitor/vm/vminsights-health-alerts.md index 98c5b2c5fd84..5365b64e1eec 100644 --- a/articles/azure-monitor/vm/vminsights-health-alerts.md +++ b/articles/azure-monitor/vm/vminsights-health-alerts.md @@ -33,7 +33,7 @@ An [Azure alert](../alerts/alerts-overview.md) will be created for each virtual If an alert is already in **Fired** state when the virtual machine state changes, then a second alert won't be created, but the severity of the same alert will be changed to match the state of the virtual machine. For example, if the virtual machine changes to **Critical** state when a **Warning** alert was already in **Fired** state, that alert's severity will be changed to **Sev1**. If the virtual machine changes to a **Warning** state when a **Sev1** alert was already in **Fired** state, that alert's severity will be changed to **Sev2**. If the virtual machine moves back to a **Healthy** state, then the alert will be resolved with severity changed to **Sev4**. ## Viewing alerts -View alerts created by VM insights guest health with other [alerts in the Azure portal](../alerts/alerts-overview.md#alerts-experience). You can select **Alerts** from the **Azure Monitor** menu to view alerts for all monitored resources, or select **Alerts** from a virtual machine's menu to view alerts for just that virtual machine. +View alerts created by VM insights guest health with other [alerts in the Azure portal](../alerts/alerts-page.md). You can select **Alerts** from the **Azure Monitor** menu to view alerts for all monitored resources, or select **Alerts** from a virtual machine's menu to view alerts for just that virtual machine. ## Alert properties diff --git a/articles/azure-netapp-files/azacsnap-release-notes.md b/articles/azure-netapp-files/azacsnap-release-notes.md index de71c9ab049f..49e83acdee75 100644 --- a/articles/azure-netapp-files/azacsnap-release-notes.md +++ b/articles/azure-netapp-files/azacsnap-release-notes.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: conceptual -ms.date: 03/08/2022 +ms.date: 05/24/2022 ms.author: phjensen --- @@ -20,6 +20,23 @@ ms.author: phjensen This page lists major changes made to AzAcSnap to provide new functionality or resolve defects. +## May-2022 + +### AzAcSnap v5.0.3 (Build: 20220524.14204) - Patch update to v5.0.2 + +AzAcSnap v5.0.3 (Build: 20220524.14204) is provided as a patch update to the v5.0 branch with the following fix: + +- Fix for handling delimited identifiers when querying SAP HANA. This issue only impacted SAP HANA in HSR-HA node when there is a Secondary node configured with 'logreplay_readaccss' and has been resolved. + +Download the [latest release](https://aka.ms/azacsnapinstaller) of the installer and review how to [get started](azacsnap-get-started.md). + +### AzAcSnap v5.1 Preview (Build: 20220524.15550) + +AzAcSnap v5.1 Preview (Build: 20220524.15550) is an updated build to extend the preview expiry date for 90 days. This update contains the fix for handling delimited identifiers when querying SAP HANA as provided in v5.0.3. + +Read about the [AzAcSnap Preview](azacsnap-preview.md). +Download the [latest release of the Preview installer](https://aka.ms/azacsnap-preview-installer). + ## Mar-2022 ### AzAcSnap v5.1 Preview (Build: 20220302.81795) @@ -29,11 +46,6 @@ AzAcSnap v5.1 Preview (Build: 20220302.81795) has been released with the followi - Azure Key Vault support for securely storing the Service Principal. - A new option for `-c backup --volume` which has the `all` parameter value. -Details of these new features are in the AzAcSnap Preview documentation. - -Read about the new features and how to use the [AzAcSnap Preview](azacsnap-preview.md). -Download the [latest release of the Preview installer](https://aka.ms/azacsnap-preview-installer). - ## Feb-2022 ### AzAcSnap v5.1 Preview (Build: 20220220.55340) @@ -69,8 +81,6 @@ AzAcSnap v5.0.2 (Build: 20210827.19086) is provided as a patch update to the v5. - Fix the installer's check for the location of the hdbuserstore. The installer would check for the existence of an incorrect source directory for the hdbuserstore for the user running the install - this is fixed to check for `~/.hdb`. This fix is applicable to systems (for example, Azure Large Instance) where the hdbuserstore was pre-configured for the `root` user before installing `azacsnap`. - Installer now shows the version it will install/extract (if the installer is run without any arguments). -Download the [latest release](https://aka.ms/azacsnapinstaller) of the installer and review how to [get started](azacsnap-get-started.md). - ## May-2021 ### AzAcSnap v5.0.1 (Build: 20210524.14837) - Patch update to v5.0 diff --git a/articles/azure-netapp-files/create-active-directory-connections.md b/articles/azure-netapp-files/create-active-directory-connections.md index e6d9635963b7..d8956a0b0ca4 100644 --- a/articles/azure-netapp-files/create-active-directory-connections.md +++ b/articles/azure-netapp-files/create-active-directory-connections.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 04/29/2022 +ms.date: 05/24/2022 ms.author: anfdocs --- # Create and manage Active Directory connections for Azure NetApp Files @@ -198,30 +198,11 @@ This setting is configured in the **Active Directory Connections** under **NetAp ![Active Directory AES encryption](../media/azure-netapp-files/active-directory-aes-encryption.png) - * **LDAP Signing** + * **LDAP Signing** Select this checkbox to enable LDAP signing. This functionality enables secure LDAP lookups between the Azure NetApp Files service and the user-specified [Active Directory Domain Services domain controllers](/windows/win32/ad/active-directory-domain-services). For more information, see [ADV190023 | Microsoft Guidance for Enabling LDAP Channel Binding and LDAP Signing](https://portal.msrc.microsoft.com/en-us/security-guidance/advisory/ADV190023). ![Active Directory LDAP signing](../media/azure-netapp-files/active-directory-ldap-signing.png) - The **LDAP Signing** feature is currently in preview. If this is your first time using this feature, register the feature before using it: - - ```azurepowershell-interactive - Register-AzProviderFeature -ProviderNamespace Microsoft.NetApp -FeatureName ANFLdapSigning - ``` - - Check the status of the feature registration: - - > [!NOTE] - > The **RegistrationState** may be in the `Registering` state for up to 60 minutes before changing to`Registered`. Wait until the status is `Registered` before continuing. - - ```azurepowershell-interactive - Get-AzProviderFeature -ProviderNamespace Microsoft.NetApp -FeatureName ANFLdapSigning - ``` - - You can also use [Azure CLI commands](/cli/azure/feature) `az feature register` and `az feature show` to register the feature and display the registration status. - - - * **LDAP over TLS** See [Enable Active Directory Domain Services (AD DS) LDAP authentication for NFS volumes](configure-ldap-over-tls.md) for information about this option. diff --git a/articles/azure-netapp-files/snapshots-manage-policy.md b/articles/azure-netapp-files/snapshots-manage-policy.md index 9df053af3245..133aefaca209 100644 --- a/articles/azure-netapp-files/snapshots-manage-policy.md +++ b/articles/azure-netapp-files/snapshots-manage-policy.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: how-to -ms.date: 01/05/2022 +ms.date: 05/25/2022 ms.author: anfdocs --- @@ -24,13 +24,13 @@ ms.author: anfdocs A snapshot policy enables you to specify the snapshot creation frequency in hourly, daily, weekly, or monthly cycles. You also need to specify the maximum number of snapshots to retain for the volume. -1. From the NetApp Account view, click **Snapshot policy**. +1. From the NetApp Account view, select **Snapshot policy**. ![Screenshot that shows how to navigate to Snapshot Policy.](../media/azure-netapp-files/snapshot-policy-navigation.png) 2. In the Snapshot Policy window, set Policy State to **Enabled**. -3. Click the **Hourly**, **Daily**, **Weekly**, or **Monthly** tab to create hourly, daily, weekly, or monthly snapshot policies. Specify the **Number of snapshots to keep**. +3. Select the **Hourly**, **Daily**, **Weekly**, or **Monthly** tab to create hourly, daily, weekly, or monthly snapshot policies. Specify the **Number of snapshots to keep**. > [!IMPORTANT] > For *monthly* snapshot policy definition, be sure to specify a day that will work for all intended months. If you intend for the monthly snapshot configuration to work for all months in the year, pick a day of the month between 1 and 28. For example, if you specify `31` (day of the month), the monthly snapshot configuration is skipped for the months that have less than 31 days. @@ -53,7 +53,7 @@ A snapshot policy enables you to specify the snapshot creation frequency in hour ![Screenshot that shows the monthly snapshot policy.](../media/azure-netapp-files/snapshot-policy-monthly.png) -4. Click **Save**. +4. Select **Save**. If you need to create additional snapshot policies, repeat Step 3. The policies you created appear in the Snapshot policy page. @@ -70,33 +70,37 @@ You cannot apply a snapshot policy to a destination volume in cross-region repli ![Screenshot that shows the Volumes right-click menu.](../media/azure-netapp-files/volume-right-cick-menu.png) -2. In the Edit window, under **Snapshot policy**, select a policy to use for the volume. Click **OK** to apply the policy. +2. In the Edit window, under **Snapshot policy**, select a policy to use for the volume. Select **OK** to apply the policy. ![Screenshot that shows the Snapshot policy menu.](../media/azure-netapp-files/snapshot-policy-edit.png) ## Modify a snapshot policy -You can modify an existing snapshot policy to change the policy state, snapshot frequency (hourly, daily, weekly, or monthly), or number of snapshots to keep. +You can modify an existing snapshot policy to change the policy state, snapshot frequency (hourly, daily, weekly, or monthly), or number of snapshots to keep. + +When modifying a snapshot policy, snapshots created with an old schedule will not be deleted or overwritten by the new schedule or disable the schedule. If you proceed with the update, you will have to manually delete the old snapshots. -1. From the NetApp Account view, click **Snapshot policy**. +1. From the NetApp Account view, select **Snapshot policy**. 2. Right-click the snapshot policy you want to modify, then select **Edit**. ![Screenshot that shows the Snapshot policy right-click menu.](../media/azure-netapp-files/snapshot-policy-right-click-menu.png) -3. Make the changes in the Snapshot Policy window that appears, then click **Save**. +3. Make the changes in the Snapshot Policy window that appears, then select **Save**. + +4. You will receive a prompt asking you to confirm that you want to update the Snapshot Policy. Select **Yes** to confirm your choice. ## Delete a snapshot policy You can delete a snapshot policy that you no longer want to keep. -1. From the NetApp Account view, click **Snapshot policy**. +1. From the NetApp Account view, select **Snapshot policy**. 2. Right-click the snapshot policy you want to modify, then select **Delete**. ![Screenshot that shows the Delete menu item.](../media/azure-netapp-files/snapshot-policy-right-click-menu.png) -3. Click **Yes** to confirm that you want to delete the snapshot policy. +3. Select **Yes** to confirm that you want to delete the snapshot policy. ![Screenshot that shows snapshot policy delete confirmation.](../media/azure-netapp-files/snapshot-policy-delete-confirm.png) diff --git a/articles/azure-netapp-files/whats-new.md b/articles/azure-netapp-files/whats-new.md index 90d1e52a4742..5bb63a501ae2 100644 --- a/articles/azure-netapp-files/whats-new.md +++ b/articles/azure-netapp-files/whats-new.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: overview -ms.date: 05/18/2022 +ms.date: 05/25/2022 ms.author: anfdocs --- @@ -22,6 +22,10 @@ Azure NetApp Files is updated regularly. This article provides a summary about t ## May 2022 +* [LDAP signing](create-active-directory-connections.md#ldap-signing) now generally available (GA) + + The LDAP signing feature is now generally available. You no longer need to register the feature before using it. + * [SMB Continuous Availability (CA) shares support for Citrix App Layering](enable-continuous-availability-existing-smb.md) (Preview) [Citrix App Layering](https://docs.citrix.com/en-us/citrix-app-layering/4.html) radically reduces the time it takes to manage Windows applications and images. App Layering separates the management of your OS and apps from your infrastructure. You can install each app and OS patch once, update the associated templates, and redeploy your images. You can publish layered images as open standard virtual disks, usable in any environment. App Layering can be used to provide dynamic access application layer virtual disks stored on SMB shared networked storage, including Azure NetApp Files. To enhance App Layering resiliency to events of storage service maintenance, Azure NetApp Files has extended support for [SMB Transparent Failover via SMB Continuous Availability (CA) shares on Azure NetApp Files](azure-netapp-files-create-volumes-smb.md#continuous-availability) for App Layering virtual disks. For more information, see [Azure NetApp Files Azure Virtual Desktop Infrastructure solutions | Citrix](azure-netapp-files-solution-architectures.md#citrix). diff --git a/articles/azure-relay/ip-firewall-virtual-networks.md b/articles/azure-relay/ip-firewall-virtual-networks.md index 283c78eb2d39..9f368a4fa240 100644 --- a/articles/azure-relay/ip-firewall-virtual-networks.md +++ b/articles/azure-relay/ip-firewall-virtual-networks.md @@ -11,10 +11,6 @@ By default, Relay namespaces are accessible from internet as long as the request This feature is helpful in scenarios in which Azure Relay should be only accessible from certain well-known sites. Firewall rules enable you to configure rules to accept traffic originating from specific IPv4 addresses. For example, if you use Relay with [Azure Express Route](../expressroute/expressroute-faqs.md#supported-services), you can create a **firewall rule** to allow traffic from only your on-premises infrastructure IP addresses. -> [!IMPORTANT] -> This feature is currently in preview. - - ## Enable IP firewall rules The IP firewall rules are applied at the namespace level. Therefore, the rules apply to all connections from clients using any supported protocol. Any connection attempt from an IP address that does not match an allowed IP rule on the namespace is rejected as unauthorized. The response does not mention the IP rule. IP filter rules are applied in order, and the first rule that matches the IP address determines the accept or reject action. diff --git a/articles/azure-resource-manager/bicep/toc.yml b/articles/azure-resource-manager/bicep/toc.yml index bd18e623a86c..0d43d7f1db3d 100644 --- a/articles/azure-resource-manager/bicep/toc.yml +++ b/articles/azure-resource-manager/bicep/toc.yml @@ -85,6 +85,8 @@ href: ../../container-instances/container-instances-quickstart-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Databases items: + - name: Cache for Redis + href: ../../azure-cache-for-redis/cache-redis-cache-bicep-provision.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Cosmos DB href: ../../cosmos-db/sql/quick-create-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Database for MariaDB @@ -161,6 +163,8 @@ href: ../../api-management/quickstart-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Notification Hubs href: ../../notification-hubs/create-notification-hub-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json + - name: Redis Web App + href: ../../azure-cache-for-redis/cache-web-app-bicep-with-redis-cache-provision.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: SignalR Service href: ../../azure-signalr/signalr-quickstart-azure-signalr-service-bicep.md?toc=/azure/azure-resource-manager/bicep/toc.json - name: Concepts diff --git a/articles/azure-resource-manager/management/overview.md b/articles/azure-resource-manager/management/overview.md index 50ef80189a85..5391befe2123 100644 --- a/articles/azure-resource-manager/management/overview.md +++ b/articles/azure-resource-manager/management/overview.md @@ -2,7 +2,7 @@ title: Azure Resource Manager overview description: Describes how to use Azure Resource Manager for deployment, management, and access control of resources on Azure. ms.topic: overview -ms.date: 02/03/2022 +ms.date: 05/26/2022 ms.custom: contperf-fy21q1,contperf-fy21q3-portal --- # What is Azure Resource Manager? @@ -13,7 +13,7 @@ To learn about Azure Resource Manager templates (ARM templates), see the [ARM te ## Consistent management layer -When a user sends a request from any of the Azure tools, APIs, or SDKs, Resource Manager receives the request. It authenticates and authorizes the request. Resource Manager sends the request to the Azure service, which takes the requested action. Because all requests are handled through the same API, you see consistent results and capabilities in all the different tools. +When you send a request through any of the Azure APIs, tools, or SDKs, Resource Manager receives the request. It authenticates and authorizes the request before forwarding it to the appropriate Azure service. Because all requests are handled through the same API, you see consistent results and capabilities in all the different tools. The following image shows the role Azure Resource Manager plays in handling Azure requests. @@ -32,6 +32,8 @@ If you're new to Azure Resource Manager, there are some terms you might not be f * **ARM template** - A JavaScript Object Notation (JSON) file that defines one or more resources to deploy to a resource group, subscription, management group, or tenant. The template can be used to deploy the resources consistently and repeatedly. See [Template deployment overview](../templates/overview.md). * **Bicep file** - A file for declaratively deploying Azure resources. Bicep is a language that's been designed to provide the best authoring experience for infrastructure as code solutions in Azure. See [Bicep overview](../bicep/overview.md). +For more definitions of Azure terminology, see [Azure fundamental concepts](/azure/cloud-adoption-framework/ready/considerations/fundamental-concepts). + ## The benefits of using Resource Manager With Resource Manager, you can: diff --git a/articles/azure-resource-manager/management/resource-name-rules.md b/articles/azure-resource-manager/management/resource-name-rules.md index 2ad1f243e10c..67e845c594a8 100644 --- a/articles/azure-resource-manager/management/resource-name-rules.md +++ b/articles/azure-resource-manager/management/resource-name-rules.md @@ -4,7 +4,7 @@ description: Shows the rules and restrictions for naming Azure resources. ms.topic: conceptual author: tfitzmac ms.author: tomfitz -ms.date: 05/17/2022 +ms.date: 05/25/2022 --- # Naming rules and restrictions for Azure resources @@ -537,7 +537,7 @@ In the following tables, the term alphanumeric refers to: > [!div class="mx-tableFixed"] > | Entity | Scope | Length | Valid Characters | > | --- | --- | --- | --- | -> | mediaservices | resource group | 3-24 | Lowercase letters and numbers. | +> | mediaservices | Azure region | 3-24 | Lowercase letters and numbers. | > | mediaservices / liveEvents | Media service | 1-32 | Alphanumerics and hyphens.

    Start with alphanumeric. | > | mediaservices / liveEvents / liveOutputs | Live event | 1-256 | Alphanumerics and hyphens.

    Start with alphanumeric. | > | mediaservices / streamingEndpoints | Media service | 1-24 | Alphanumerics and hyphens.

    Start with alphanumeric. | @@ -818,14 +818,15 @@ In the following tables, the term alphanumeric refers to: > | --- | --- | --- | --- | > | certificates | resource group | 1-260 | Can't use:
    `/`

    Can't end with space or period. | > | serverfarms | resource group | 1-40 | Alphanumeric, hyphens and Unicode characters that can be mapped to Punycode | -> | sites / functions / slots | global or per domain. See note below. | 2-60 | Alphanumeric, hyphens and Unicode characters that can be mapped to Punycode

    Can't start or end with hyphen. | +> | sites | global or per domain. See note below. | 2-60 | Alphanumeric, hyphens and Unicode characters that can be mapped to Punycode

    Can't start or end with hyphen. | +> | sites / slots | site | 2-59 | Alphanumeric, hyphens and Unicode characters that can be mapped to Punycode | > [!NOTE] > A web site must have a globally unique URL. When you create a web site that uses a hosting plan, the URL is `http://.azurewebsites.net`. The app name must be globally unique. When you create a web site that uses an App Service Environment, the app name must be unique within the [domain for the App Service Environment](../../app-service/environment/using-an-ase.md#app-access). For both cases, the URL of the site is globally unique. > > Azure Functions has the same naming rules and restrictions as Microsoft.Web/sites. When generating the host ID, the function app name is truncated to 32 characters. This can cause host ID collision when a shared storage account is used. For more information, see [Host ID considerations](../../azure-functions/storage-considerations.md#host-id-considerations). > -> Unicode characters are parsed to Punycode using the following method: https://docs.microsoft.com/dotnet/api/system.globalization.idnmapping.getascii +> Unicode characters are parsed to Punycode using the [IdnMapping.GetAscii method](/dotnet/api/system.globalization.idnmapping.getascii) ## Next steps diff --git a/articles/azure-resource-manager/templates/best-practices.md b/articles/azure-resource-manager/templates/best-practices.md index b3122ac56e07..012947d0951f 100644 --- a/articles/azure-resource-manager/templates/best-practices.md +++ b/articles/azure-resource-manager/templates/best-practices.md @@ -2,7 +2,7 @@ title: Best practices for templates description: Describes recommended approaches for authoring Azure Resource Manager templates (ARM templates). Offers suggestions to avoid common problems when using templates. ms.topic: conceptual -ms.date: 04/23/2021 +ms.date: 05/26/2022 --- # ARM template best practices @@ -10,13 +10,13 @@ This article shows you how to use recommended practices when constructing your A ## Template limits -Limit the size of your template to 4 MB. The 4-MB limit applies to the final state of the template after it has been expanded with iterative resource definitions, and values for variables and parameters. The parameter file is also limited to 4 MB. You may get an error with a template or parameter file of less than 4 MB, if the total size of the request is too large. For more information about how to simplify your template to avoid a large request, see [Resolve errors for job size exceeded](error-job-size-exceeded.md). +Limit the size of your template to 4 MB. The 4-MB limit applies to the final state of the template after it has been expanded with iterative resource definitions, and values for variables and parameters. The parameter file is also limited to 4 MB. You may get an error with a template or parameter file of less than 4 MB if the total size of the request is too large. For more information about how to simplify your template to avoid a large request, see [Resolve errors for job size exceeded](error-job-size-exceeded.md). You're also limited to: * 256 parameters * 256 variables -* 800 resources (including copy count) +* 800 resources (including [copy count](copy-resources.md)) * 64 output values * 24,576 characters in a template expression @@ -164,7 +164,7 @@ When deciding what [dependencies](./resource-dependency.md) to set, use the foll * Set a child resource as dependent on its parent resource. -* Resources with the [condition element](conditional-resource-deployment.md) set to false are automatically removed from the dependency order. Set the dependencies as if the resource is always deployed. +* Resources with the [condition element](conditional-resource-deployment.md) set to `false` are automatically removed from the dependency order. Set the dependencies as if the resource is always deployed. * Let dependencies cascade without setting them explicitly. For example, your virtual machine depends on a virtual network interface, and the virtual network interface depends on a virtual network and public IP addresses. Therefore, the virtual machine is deployed after all three resources, but don't explicitly set the virtual machine as dependent on all three resources. This approach clarifies the dependency order and makes it easier to change the template later. @@ -222,15 +222,14 @@ The following information can be helpful when you work with [resources](./syntax } ``` -* Assign public IP addresses to a virtual machine only when an application requires it. To connect to a virtual machine (VM) for debugging, or for management or administrative purposes, use inbound NAT rules, a virtual network gateway, or a jumpbox. +* Assign public IP addresses to a virtual machine only when an application requires it. To connect to a virtual machine for administrative purposes, use inbound NAT rules, a virtual network gateway, or a jumpbox. For more information about connecting to virtual machines, see: - * [Run VMs for an N-tier architecture in Azure](/azure/architecture/reference-architectures/n-tier/n-tier-sql-server) - * [Set up WinRM access for VMs in Azure Resource Manager](../../virtual-machines/windows/winrm.md) - * [Allow external access to your VM by using the Azure portal](../../virtual-machines/windows/nsg-quickstart-portal.md) - * [Allow external access to your VM by using PowerShell](../../virtual-machines/windows/nsg-quickstart-powershell.md) - * [Allow external access to your Linux VM by using Azure CLI](../../virtual-machines/linux/nsg-quickstart.md) + * [What is Azure Bastion?](../../bastion/bastion-overview.md) + * [How to connect and sign on to an Azure virtual machine running Windows](../../virtual-machines/windows/connect-logon.md) + * [Setting up WinRM access for Virtual Machines in Azure Resource Manager](../../virtual-machines/windows/winrm.md) + * [Connect to a Linux VM](../../virtual-machines/linux-vm-connect.md) * The `domainNameLabel` property for public IP addresses must be unique. The `domainNameLabel` value must be between 3 and 63 characters long, and follow the rules specified by this regular expression: `^[a-z][a-z0-9-]{1,61}[a-z0-9]$`. Because the `uniqueString` function generates a string that is 13 characters long, the `dnsPrefixString` parameter is limited to 50 characters. diff --git a/articles/azure-resource-manager/templates/deploy-what-if.md b/articles/azure-resource-manager/templates/deploy-what-if.md index ac7107e1dbfc..9e84ddf56fd1 100644 --- a/articles/azure-resource-manager/templates/deploy-what-if.md +++ b/articles/azure-resource-manager/templates/deploy-what-if.md @@ -390,6 +390,7 @@ You can use the what-if operation through the Azure SDKs. ## Next steps +- [ARM Deployment Insights](https://marketplace.visualstudio.com/items?itemName=AuthorityPartnersInc.arm-deployment-insights) extension provides an easy way to integrate the what-if operation in your Azure DevOps pipeline. - To use the what-if operation in a pipeline, see [Test ARM templates with What-If in a pipeline](https://4bes.nl/2021/03/06/test-arm-templates-with-what-if/). - If you notice incorrect results from the what-if operation, please report the issues at [https://aka.ms/whatifissues](https://aka.ms/whatifissues). - For a Microsoft Learn module that covers using what if, see [Preview changes and validate Azure resources by using what-if and the ARM template test toolkit](/learn/modules/arm-template-test/). diff --git a/articles/azure-resource-manager/templates/overview.md b/articles/azure-resource-manager/templates/overview.md index 09a59e7e9679..e05485f55f3d 100644 --- a/articles/azure-resource-manager/templates/overview.md +++ b/articles/azure-resource-manager/templates/overview.md @@ -2,7 +2,7 @@ title: Templates overview description: Describes the benefits using Azure Resource Manager templates (ARM templates) for deployment of resources. ms.topic: conceptual -ms.date: 12/01/2021 +ms.date: 05/26/2022 --- # What are ARM templates? diff --git a/articles/azure-resource-manager/templates/template-spec-convert.md b/articles/azure-resource-manager/templates/template-spec-convert.md index a384410d44c8..1b5189514764 100644 --- a/articles/azure-resource-manager/templates/template-spec-convert.md +++ b/articles/azure-resource-manager/templates/template-spec-convert.md @@ -2,7 +2,7 @@ title: Convert portal template to template spec description: Describes how to convert an existing template in the Azure portal gallery to a template specs. ms.topic: conceptual -ms.date: 02/04/2021 +ms.date: 05/25/2022 ms.author: tomfitz author: tfitzmac --- @@ -12,6 +12,10 @@ The Azure portal provides a way to store Azure Resource Manager templates (ARM t To see if you have any templates to convert, view the [template gallery in the portal](https://portal.azure.com/#blade/HubsExtension/BrowseResourceBlade/resourceType/Microsoft.Gallery%2Fmyareas%2Fgalleryitems). These templates have the resource type `Microsoft.Gallery/myareas/galleryitems`. +## Deprecation of portal feature + +**The template gallery in the portal is being deprecated on March 31, 2025**. To continue using a template in the template gallery, you need to migrate it to a template spec. Use one of the methods shown in this article to migrate the template. + ## Convert with PowerShell script To simplify converting templates in the template gallery, use a PowerShell script from the Azure Quickstart Templates repo. When you run the script, you can either create a new template spec for each template or download a template that creates the template spec. The script doesn't delete the template from the template gallery. diff --git a/articles/azure-signalr/signalr-howto-diagnostic-logs.md b/articles/azure-signalr/signalr-howto-diagnostic-logs.md index b1c322d46a11..efee03781e62 100644 --- a/articles/azure-signalr/signalr-howto-diagnostic-logs.md +++ b/articles/azure-signalr/signalr-howto-diagnostic-logs.md @@ -213,9 +213,9 @@ If you find that you can't establish SignalR client connections to Azure SignalR When encountering message related problem, you can take advantage of messaging logs to troubleshoot. Firstly, [enable resource logs](#enable-resource-logs) in service, logs for server and client. > [!NOTE] -> For ASP.NET Core, see [here](https://docs.microsoft.com/aspnet/core/signalr/diagnostics) to enable logging in server and client. +> For ASP.NET Core, see [here](/aspnet/core/signalr/diagnostics) to enable logging in server and client. > -> For ASP.NET, see [here](https://docs.microsoft.com/aspnet/signalr/overview/testing-and-debugging/enabling-signalr-tracing) to enable logging in server and client. +> For ASP.NET, see [here](/aspnet/signalr/overview/testing-and-debugging/enabling-signalr-tracing) to enable logging in server and client. If you don't mind potential performance impact and no client-to-server direction message, check the `Messaging` in `Log Source Settings/Types` to enable *collect-all* log collecting behavior. For more information about this behavior, see [collect all section](#collect-all). @@ -239,7 +239,7 @@ For **collect all** collecting behavior: SignalR service only trace messages in direction **from server to client via SignalR service**. The tracing ID will be generated in server, the message will carry the tracing ID to SignalR service. > [!NOTE] -> If you want to trace message and [send messages from outside a hub](https://docs.microsoft.com/aspnet/core/signalr/hubcontext) in your app server, you need to enable **collect all** collecting behavior to collect message logs for the messages which are not originated from diagnostic clients. +> If you want to trace message and [send messages from outside a hub](/aspnet/core/signalr/hubcontext) in your app server, you need to enable **collect all** collecting behavior to collect message logs for the messages which are not originated from diagnostic clients. > Diagnostic clients works for both **collect all** and **collect partially** collecting behaviors. It has higher priority to collect logs. For more information, see [diagnostic client section](#diagnostic-client). By checking the sign in server and service side, you can easily find out whether the message is sent from server, arrives at SignalR service, and leaves from SignalR service. Basically, by checking if the *received* and *sent* message are matched or not based on message tracing ID, you can tell whether the message loss issue is in server or SignalR service in this direction. For more information, see the [details](#message-flow-detail-for-path3) below. diff --git a/articles/azure-video-indexer/concepts-overview.md b/articles/azure-video-indexer/concepts-overview.md index ee680d7a7fd6..47ac3dfd58ff 100644 --- a/articles/azure-video-indexer/concepts-overview.md +++ b/articles/azure-video-indexer/concepts-overview.md @@ -42,10 +42,6 @@ The confidence score indicates the confidence in an insight. It is a number betw Use textual and visual content moderation models to keep your users safe from inappropriate content and validate that the content you publish matches your organization's values. You can automatically block certain videos or alert your users about the content. For more information, see [Insights: visual and textual content moderation](video-indexer-output-json-v2.md#visualcontentmoderation). -## Blocks - -Blocks are meant to make it easier to go through the data. For example, block might be broken down based on when speakers change or there is a long pause. - ## Project and editor The [Azure Video Indexer](https://www.videoindexer.ai/) website enables you to use your video's deep insights to: find the right media content, locate the parts that you’re interested in, and use the results to create an entirely new project. Once created, the project can be rendered and downloaded from Azure Video Indexer and be used in your own editing applications or downstream workflows. diff --git a/articles/azure-video-indexer/deploy-with-arm-template.md b/articles/azure-video-indexer/deploy-with-arm-template.md index dd8443ced8e0..e1a248589591 100644 --- a/articles/azure-video-indexer/deploy-with-arm-template.md +++ b/articles/azure-video-indexer/deploy-with-arm-template.md @@ -2,7 +2,7 @@ title: Deploy Azure Video Indexer with ARM template description: In this tutorial you will create an Azure Video Indexer account by using Azure Resource Manager (ARM) template. ms.topic: tutorial -ms.date: 12/01/2021 +ms.date: 05/23/2022 ms.author: juliako --- @@ -28,13 +28,13 @@ The resource will be deployed to your subscription and will create the Azure Vid ### Option 1: Click the "Deploy To Azure Button", and fill in the missing parameters -[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure-Samples%2Fmedia-services-video-indexer%2Fmaster%2FARM-Samples%2FCreate-Account%2Favam.template.json) +[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure-Samples%2Fmedia-services-video-indexer%2Fmaster%2FARM-Quick-Start%2Favam.template.json) ---- ### Option 2 : Deploy using PowerShell Script -1. Open The [template file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Samples/Create-Account/avam.template.json) file and inspect its content. +1. Open The [template file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Quick-Start/avam.template.json) file and inspect its content. 2. Fill in the required parameters (see below) 3. Run the Following PowerShell commands: @@ -94,7 +94,7 @@ The resource will be deployed to your subscription and will create the Azure Vid If you're new to Azure Video Indexer, see: -* [Azure Video Indexer Documentation](/azure/azure-video-indexer) +* [Azure Video Indexer Documentation](./index.yml) * [Azure Video Indexer Developer Portal](https://api-portal.videoindexer.ai/) * After completing this tutorial, head to other Azure Video Indexer samples, described on [README.md](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/README.md) diff --git a/articles/azure-video-indexer/monitor-video-indexer-data-reference.md b/articles/azure-video-indexer/monitor-video-indexer-data-reference.md index b5bc077244d9..5f4b15953b75 100644 --- a/articles/azure-video-indexer/monitor-video-indexer-data-reference.md +++ b/articles/azure-video-indexer/monitor-video-indexer-data-reference.md @@ -41,7 +41,7 @@ Azure Video Indexer currently does not support any monitoring on metrics. --------------**OPTION 2 EXAMPLE** ------------- - @@ -163,7 +163,7 @@ This section refers to all of the Azure Monitor Logs Kusto tables relevant to Az @@ -229,7 +229,7 @@ The following table lists the operations related to Azure Video Indexer that may -For more information on the schema of Activity Log entries, see [Activity Log schema](/azure/azure-monitor/essentials/activity-log-schema). +For more information on the schema of Activity Log entries, see [Activity Log schema](../azure-monitor/essentials/activity-log-schema.md). ## Schemas @@ -269,4 +269,4 @@ The following schemas are in use by Azure Video Indexer - See [Monitoring Azure Azure Video Indexer](monitor-video-indexer.md) for a description of monitoring Azure Video Indexer. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/azure-video-indexer/monitor-video-indexer.md b/articles/azure-video-indexer/monitor-video-indexer.md index 6147e8934bbe..f76efef9c7be 100644 --- a/articles/azure-video-indexer/monitor-video-indexer.md +++ b/articles/azure-video-indexer/monitor-video-indexer.md @@ -25,7 +25,7 @@ Keep the headings in this order. When you have critical applications and business processes relying on Azure resources, you want to monitor those resources for their availability, performance, and operation. -This article describes the monitoring data generated by Azure Video Indexer. Azure Video Indexer uses [Azure Monitor](/azure/azure-monitor/overview). If you are unfamiliar with the features of Azure Monitor common to all Azure services that use it, read [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource). +This article describes the monitoring data generated by Azure Video Indexer. Azure Video Indexer uses [Azure Monitor](../azure-monitor/overview.md). If you are unfamiliar with the features of Azure Monitor common to all Azure services that use it, read [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md). @@ -51,7 +51,7 @@ Some services in Azure have a special focused pre-built monitoring dashboard in ## Monitoring data -Azure Video Indexer collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](/azure/azure-monitor/essentials/monitor-azure-resource#monitoring-data-from-Azure-resources). +Azure Video Indexer collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](../azure-monitor/essentials/monitor-azure-resource.md#monitoring-data-from-azure-resources). See [Monitoring *Azure Video Indexer* data reference](monitor-video-indexer-data-reference.md) for detailed information on the metrics and logs metrics created by Azure Video Indexer. @@ -86,7 +86,7 @@ Currently Azure Video Indexer does not support monitoring of metrics. - @@ -106,9 +106,9 @@ If you don't support resource logs, say so. Some services may be only onboarded Data in Azure Monitor Logs is stored in tables where each table has its own set of unique properties. -All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](/azure/azure-monitor/essentials/resource-logs-schema) The schema for Azure Video Indexer resource logs is found in the [Azure Video Indexer Data Reference](monitor-video-indexer-data-reference.md#schemas) +All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](../azure-monitor/essentials/resource-logs-schema.md) The schema for Azure Video Indexer resource logs is found in the [Azure Video Indexer Data Reference](monitor-video-indexer-data-reference.md#schemas) -The [Activity log](/azure/azure-monitor/essentials/activity-log) is a type of platform sign-in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. +The [Activity log](../azure-monitor/essentials/activity-log.md) is a type of platform sign-in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. For a list of the types of resource logs collected for Azure Video Indexer, see [Monitoring Azure Video Indexer data reference](monitor-video-indexer-data-reference.md#resource-logs) @@ -122,7 +122,7 @@ For a list of the tables used by Azure Monitor Logs and queryable by Log Analyti > [!IMPORTANT] -> When you select **Logs** from the Azure Video Indexer account menu, Log Analytics is opened with the query scope set to the current Azure Video Indexer account. This means that log queries will only include data from that resource. If you want to run a query that includes data from other Azure Video Indexer account or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](/azure/azure-monitor/logs/scope) for details. +> When you select **Logs** from the Azure Video Indexer account menu, Log Analytics is opened with the query scope set to the current Azure Video Indexer account. This means that log queries will only include data from that resource. If you want to run a query that includes data from other Azure Video Indexer account or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](../azure-monitor/logs/scope.md) for details. @@ -151,10 +151,10 @@ VIAudit This information is the BIGGEST request we get in Azure Monitor so do not avoid it long term. People don't know what to monitor for best results. Be prescriptive --> -Azure Monitor alerts proactively notify you when important conditions are found in your monitoring data. They allow you to identify and address issues in your system before your customers notice them. You can set alerts on [metrics](/azure/azure-monitor/alerts/alerts-metric-overview), [logs](/azure/azure-monitor/alerts/alerts-unified-log), and the [activity log](/azure/azure-monitor/alerts/activity-log-alerts). Different types of alerts have benefits and drawbacks. +Azure Monitor alerts proactively notify you when important conditions are found in your monitoring data. They allow you to identify and address issues in your system before your customers notice them. You can set alerts on [metrics](../azure-monitor/alerts/alerts-metric-overview.md), [logs](../azure-monitor/alerts/alerts-unified-log.md), and the [activity log](../azure-monitor/alerts/activity-log-alerts.md). Different types of alerts have benefits and drawbacks. - The following table lists common and recommended alert rules for Azure Video Indexer. @@ -176,4 +176,4 @@ VIAudit - See [Monitoring Azure Video Indexer data reference](monitor-video-indexer-data-reference.md) for a reference of the metrics, logs, and other important values created by Azure Video Indexer account. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. diff --git a/articles/azure-video-indexer/network-security.md b/articles/azure-video-indexer/network-security.md index 2fa208e0aaea..12708ded1105 100644 --- a/articles/azure-video-indexer/network-security.md +++ b/articles/azure-video-indexer/network-security.md @@ -8,7 +8,7 @@ ms.author: juliako # NSG service tags for Azure Video Indexer -Azure Video Indexer (formerly Video Analyzer for Media) is a service hosted on Azure. In some architecture cases the service needs to interact with other services in order to index video files (that is, a Storage Account) or when a customer orchestrates indexing jobs against our API endpoint using their own service hosted on Azure (i.e AKS, Web Apps, Logic Apps, Functions). Customers who would like to limit access to their resources on a network level can use [Network Security Groups with Service Tags](https://docs.microsoft.com/azure/virtual-network/service-tags-overview). A service tag represents a group of IP address prefixes from a given Azure service, in this case Azure Video Indexer. Microsoft manages the address prefixes grouped by the service tag and automatically updates the service tag as addresses change in our backend, minimizing the complexity of frequent updates to network security rules by the customer. +Azure Video Indexer (formerly Video Analyzer for Media) is a service hosted on Azure. In some architecture cases the service needs to interact with other services in order to index video files (that is, a Storage Account) or when a customer orchestrates indexing jobs against our API endpoint using their own service hosted on Azure (i.e AKS, Web Apps, Logic Apps, Functions). Customers who would like to limit access to their resources on a network level can use [Network Security Groups with Service Tags](../virtual-network/service-tags-overview.md). A service tag represents a group of IP address prefixes from a given Azure service, in this case Azure Video Indexer. Microsoft manages the address prefixes grouped by the service tag and automatically updates the service tag as addresses change in our backend, minimizing the complexity of frequent updates to network security rules by the customer. ## Get started with service tags @@ -34,7 +34,7 @@ This tag contains the IP addresses of Azure Video Indexer services for all regio ## Using Azure CLI -You can also use Azure CLI to create a new or update an existing NSG rule and add the **AzureVideoAnalyzerForMedia** service tag using the `--source-address-prefixes`. For a full list of CLI commands and parameters see [az network nsg](https://docs.microsoft.com/cli/azure/network/nsg/rule?view=azure-cli-latest) +You can also use Azure CLI to create a new or update an existing NSG rule and add the **AzureVideoAnalyzerForMedia** service tag using the `--source-address-prefixes`. For a full list of CLI commands and parameters see [az network nsg](/cli/azure/network/nsg/rule?view=azure-cli-latest) Example of a security rule using service tags. For more details, visit https://aka.ms/servicetags diff --git a/articles/azure-video-indexer/release-notes.md b/articles/azure-video-indexer/release-notes.md index 6e0ccead7ae4..08ccf1a919a5 100644 --- a/articles/azure-video-indexer/release-notes.md +++ b/articles/azure-video-indexer/release-notes.md @@ -45,7 +45,7 @@ var uploadRequestResult = await client.PostAsync($"{apiUrl}/{accountInfo.Loc ### Line breaking in transcripts -Improved line break logic to better split transcript into sentences. New editing capabilities are now available through the Azure Video Indexer portal, such as adding a new line and editing the line’s timestamp. +Improved line break logic to better split transcript into sentences. New editing capabilities are now available through the Azure Video Indexer portal, such as adding a new line and editing the line’s timestamp. For more information, see [Insert or remove transcript lines](edit-transcript-lines-portal.md). ### Azure Monitor integration diff --git a/articles/azure-video-indexer/upload-index-videos.md b/articles/azure-video-indexer/upload-index-videos.md index bbc492580b69..a22eb519ecdc 100644 --- a/articles/azure-video-indexer/upload-index-videos.md +++ b/articles/azure-video-indexer/upload-index-videos.md @@ -17,6 +17,8 @@ When you're creating an Azure Video Indexer account, you choose between: For more information about account types, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). +After you upload and index a video, you can use [Azure Video Indexer website](video-indexer-view-edit.md) or [Azure Video Indexer Developer Portal](video-indexer-use-apis.md) to see the insights of the video (see [Examine the Azure Video Indexer output](video-indexer-output-json-v2.md)). + When you're uploading videos by using the API, you have the following options: * Upload your video from a URL (preferred). @@ -25,7 +27,7 @@ When you're uploading videos by using the API, you have the following options: ## Supported file formats -For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/azure/media-services/latest/encode-media-encoder-standard-formats-reference). +For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/media-services/latest/encode-media-encoder-standard-formats-reference). ## Storage of video files diff --git a/articles/azure-video-indexer/video-indexer-get-started.md b/articles/azure-video-indexer/video-indexer-get-started.md index 60e565eecbe6..f1aa77a45f86 100644 --- a/articles/azure-video-indexer/video-indexer-get-started.md +++ b/articles/azure-video-indexer/video-indexer-get-started.md @@ -38,27 +38,29 @@ See the [input container/file formats](/azure/media-services/latest/encode-media > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/video-indexer-upload.png" alt-text="Upload"::: -1. Once your video has been uploaded, Azure Video Indexer starts indexing and analyzing the video. You see the progress. +1. Once your video has been uploaded, Azure Video Indexer starts indexing and analyzing the video. As a result a JSON output with insights is produced. + + You see the progress. > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/video-indexer-get-started/progress.png" alt-text="Progress of the upload"::: + > :::image type="content" source="./media/video-indexer-get-started/progress.png" alt-text="Progress of the upload"::: + + The produced JSON output contains `Insights` and `SummarizedInsights` elements. We highly recommend using `Insights` and not using `SummarizedInsights` (which is present for backward compatibility). 1. Once Azure Video Indexer is done analyzing, you'll get an email with a link to your video and a short description of what was found in your video. For example: people, spoken and written words, topics, and named entities. 1. You can later find your video in the library list and perform different operations. For example: search, reindex, edit. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/uploaded.png" alt-text="Uploaded the upload"::: -## Supported browsers +After you upload and index a video, you can continue using [Azure Video Indexer website](video-indexer-view-edit.md) or [Azure Video Indexer Developer Portal](video-indexer-use-apis.md) to see the insights of the video (see [Examine the Azure Video Indexer output](video-indexer-output-json-v2.md)). -For more information, see [supported browsers](video-indexer-overview.md#supported-browsers). +For more details, see [Upload and index videos](upload-index-videos.md). -## See also +To start using the APIs, see [use APIs](video-indexer-use-apis.md) -See [Upload and index videos](upload-index-videos.md) for more details. - -After you upload and index a video, you can start using [Azure Video Indexer website](video-indexer-view-edit.md) or [Azure Video Indexer Developer Portal](video-indexer-use-apis.md) to see the insights of the video. +## Supported browsers -[Start using APIs](video-indexer-use-apis.md) +For more information, see [supported browsers](video-indexer-overview.md#supported-browsers). ## Next steps diff --git a/articles/azure-video-indexer/video-indexer-output-json-v2.md b/articles/azure-video-indexer/video-indexer-output-json-v2.md index 918e74fc0d38..bbdb6ca025b2 100644 --- a/articles/azure-video-indexer/video-indexer-output-json-v2.md +++ b/articles/azure-video-indexer/video-indexer-output-json-v2.md @@ -5,7 +5,7 @@ services: azure-video-analyzer author: Juliako manager: femila ms.topic: article -ms.date: 11/16/2020 +ms.date: 05/19/2022 ms.author: juliako --- @@ -13,12 +13,14 @@ ms.author: juliako When a video is indexed, Azure Video Indexer produces the JSON content that contains details of the specified video insights. The insights include transcripts, optical character recognition elements (OCRs), faces, topics, blocks, and similar details. Each insight type includes instances of time ranges that show when the insight appears in the video. -You can visually examine the video's summarized insights by pressing the **Play** button on the video on the [Azure Video Indexer](https://www.videoindexer.ai/) website. +The produced JSON output contains `Insights` and `SummarizedInsights` elements. We highly recommend using `Insights` and not using `SummarizedInsights` (which is present for backward compatibility). -You can also use the Get Video Index API. If the response status is `OK`, you get a detailed JSON output as the response content. +To visually examine the video's insights, press the **Play** button on the video on the [Azure Video Indexer](https://www.videoindexer.ai/) website. ![Screenshot of the Insights tab in Azure Video Indexer.](./media/video-indexer-output-json/video-indexer-summarized-insights.png) +When indexing with an API and the response status is OK, you get a detailed JSON output as the response content. When calling the [Get Video Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Index) API, we recommend passing `&includeSummarizedInsights=false` to save time and reduce response length. + This article examines the Azure Video Indexer output (JSON content). For information about what features and insights are available to you, see [Azure Video Indexer insights](video-indexer-overview.md#video-insights). > [!NOTE] @@ -168,7 +170,7 @@ A face might have an ID, a name, a thumbnail, other metadata, and a list of its |`transcript`|The [transcript](#transcript) insight.| |`ocr`|The [OCR](#ocr) insight.| |`keywords`|The [keywords](#keywords) insight.| -|`blocks`|Might contain one or more [blocks](#blocks).| +|`transcripts`|Might contain one or more [transcript](#transcript).| |`faces/animatedCharacters`|The [faces/animatedCharacters](#facesanimatedcharacters) insight.| |`labels`|The [labels](#labels) insight.| |`shots`|The [shots](#shots) insight.| @@ -202,13 +204,6 @@ Example: } ``` -#### blocks - -Attribute | Description ----|--- -`id`|The ID of the block.| -`instances`|A list of time ranges for this block.| - #### transcript |Name|Description| @@ -664,7 +659,7 @@ Sentiments are aggregated by their `sentimentType` field (`Positive`, `Neutral`, #### visualContentModeration -The `visualContentModeration` block contains time ranges that Azure Video Indexer found to potentially have adult content. If `visualContentModeration` is empty, no adult content was identified. +The `visualContentModeration` transcript contains time ranges that Azure Video Indexer found to potentially have adult content. If `visualContentModeration` is empty, no adult content was identified. Videos that contain adult or racy content might be available for private view only. Users have the option to submit a request for a human review of the content. In that case, the `IsAdult` attribute will contain the result of the human review. diff --git a/articles/azure-vmware/azure-vmware-solution-platform-updates.md b/articles/azure-vmware/azure-vmware-solution-platform-updates.md index 4b8c56de0f13..b9dfa61ac3de 100644 --- a/articles/azure-vmware/azure-vmware-solution-platform-updates.md +++ b/articles/azure-vmware/azure-vmware-solution-platform-updates.md @@ -10,6 +10,14 @@ ms.date: 12/22/2021 Azure VMware Solution will apply important updates starting in March 2021. You'll receive a notification through Azure Service Health that includes the timeline of the maintenance. For more information, see [Host maintenance and lifecycle management](concepts-private-clouds-clusters.md#host-maintenance-and-lifecycle-management). +## May 23, 2022 + +All new Azure VMware Solution private clouds in regions (Germany West Central, Australia East, Central US and UK West), are now deployed with VMware vCenter Server version 7.0 Update 3c and ESXi version 7.0 Update 3c. + +Any existing private clouds in the previously mentioned regions will be upgraded to those versions. For more information, please see [VMware ESXi 7.0 Update 3c Release Notes](https://docs.vmware.com/en/VMware-vSphere/7.0/rn/vsphere-esxi-70u3c-release-notes.html) and [VMware vCenter Server 7.0 Update 3c Release Notes](https://docs.vmware.com/en/VMware-vSphere/7.0/rn/vsphere-vcenter-server-70u3c-release-notes.html). + +You'll receive a notification through Azure Service Health that includes the timeline of the upgrade. You can reschedule an upgrade as needed. This notification also provides details on the upgraded component, its effect on workloads, private cloud access, and other Azure services. + ## May 9, 2022 All new Azure VMware Solution private clouds in regions (France Central, Brazil South, Japan West, Australia Southeast, Canada East, East Asia, and Southeast Asia), are now deployed with VMware vCenter Server version 7.0 Update 3c and ESXi version 7.0 Update 3c. diff --git a/articles/azure-vmware/deploy-arc-for-azure-vmware-solution.md b/articles/azure-vmware/deploy-arc-for-azure-vmware-solution.md index 808060470626..81fd6c242723 100644 --- a/articles/azure-vmware/deploy-arc-for-azure-vmware-solution.md +++ b/articles/azure-vmware/deploy-arc-for-azure-vmware-solution.md @@ -64,7 +64,7 @@ az feature show --name AzureArcForAVS --namespace Microsoft.AVS Use the following steps to guide you through the process to onboard in Arc for Azure VMware Solution (Preview). -1. Sign into the jumpbox VM and extract the contents from the compressed file from the following [location](https://github.com/Azure/ArcOnAVS/releases/tag/v2.0.0). The extracted file contains the scripts to install the preview software. +1. Sign into the jumpbox VM and extract the contents from the compressed file from the following [location](https://github.com/Azure/ArcOnAVS/releases/latest). The extracted file contains the scripts to install the preview software. 1. Open the 'config_avs.json' file and populate all the variables. **Config JSON** @@ -492,4 +492,4 @@ Appendix 1 shows proxy URLs required by the Azure Arc-enabled private cloud. The **Additional URL resources** - [Google Container Registry](http://gcr.io/) -- [Red Hat Quay.io](http://quay.io/) \ No newline at end of file +- [Red Hat Quay.io](http://quay.io/) diff --git a/articles/azure-vmware/faq.yml b/articles/azure-vmware/faq.yml index 42c05ba7f91a..e9578a993883 100644 --- a/articles/azure-vmware/faq.yml +++ b/articles/azure-vmware/faq.yml @@ -6,7 +6,7 @@ metadata: ms.service: azure-vmware ms.custom: contperf-fy21q4 ms.date: 09/29/2021 -title: Common questions about Azure VMware Solution +title: Common questions about Azure VMware Solution summary: This article answers commonly asked questions about Azure VMware Solution. sections: diff --git a/articles/azure-vmware/includes/create-private-cloud-azure-portal-steps.md b/articles/azure-vmware/includes/create-private-cloud-azure-portal-steps.md index 5c30503b7bee..977d779c70cf 100644 --- a/articles/azure-vmware/includes/create-private-cloud-azure-portal-steps.md +++ b/articles/azure-vmware/includes/create-private-cloud-azure-portal-steps.md @@ -17,12 +17,14 @@ You can create an Azure VMware Solution private cloud using the Azure portal or 1. Sign in to the [Azure portal](https://portal.azure.com). -1. Select **Create a new resource**. +1. Select **Create a resource**. -1. In the **Search the Marketplace** text box, type `Azure VMware Solution` and select it from the results. +1. In the **Search services and marketplace** text box, type `Azure VMware Solution` and select it from the search results. 1. On the **Azure VMware Solution** window, select **Create**. +1. If you need more hosts, [request a host quota increase]( https://docs.microsoft.com/azure/azure-vmware/request-host-quota-azure-vmware-solution?WT.mc_id=Portal-VMCP). + 1. On the **Basics** tab, enter values for the fields and then select **Review + Create**. >[!TIP] diff --git a/articles/azure-vmware/includes/vmware-software-versions.md b/articles/azure-vmware/includes/vmware-software-versions.md index fa8a1fd9214f..96a15699cd4b 100644 --- a/articles/azure-vmware/includes/vmware-software-versions.md +++ b/articles/azure-vmware/includes/vmware-software-versions.md @@ -15,10 +15,10 @@ The VMware software versions used in new deployments of Azure VMware Solution pr | Software | Version | | :--- | :---: | -| vCenter | 6.7 U3p | -| ESXi | 6.7 P05 | +| vCenter | 7.0 U3c | +| ESXi | 7.0 U3c | | vSAN | 6.7 P05 | -| HCX | 4.2.2 | +| HCX | 4.2.2 | | NSX-T
    **NOTE:** NSX-T is the only supported version of NSX. | [[!INCLUDE [nsxt-version](nsxt-version.md)]](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/rn/VMware-NSX-T-Data-Center-312-Release-Notes.html) | diff --git a/articles/azure-vmware/integrate-azure-native-services.md b/articles/azure-vmware/integrate-azure-native-services.md index e378ca9e41c8..c0a1a8dc08c5 100644 --- a/articles/azure-vmware/integrate-azure-native-services.md +++ b/articles/azure-vmware/integrate-azure-native-services.md @@ -60,7 +60,7 @@ You can configure the Log Analytics workspace with Microsoft Sentinel for alert If you are new to Azure or unfamiliar with any of the services previously mentioned, review the following articles: - [Automation account authentication overview](../automation/automation-security-overview.md) -- [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/design-logs-deployment.md) and [Azure Monitor](../azure-monitor/overview.md) +- [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/workspace-design.md) and [Azure Monitor](../azure-monitor/overview.md) - [Planning](../security-center/security-center-planning-and-operations-guide.md) and [Supported platforms](../security-center/security-center-os-coverage.md) for Microsoft Defender for Cloud - [Enable Azure Monitor for VMs overview](../azure-monitor/vm/vminsights-enable-overview.md) - [What is Azure Arc enabled servers?](../azure-arc/servers/overview.md) and [What is Azure Arc enabled Kubernetes?](../azure-arc/kubernetes/overview.md) @@ -133,7 +133,7 @@ Can collect data from different [sources to monitor and analyze](../azure-monito Monitor guest operating system performance to discover and map application dependencies for Azure VMware Solution or on-premises VMs. Your Log Analytics workspace in Azure Monitor enables log collection and performance counter collection using the Log Analytics agent or extensions. -1. [Design your Azure Monitor Logs deployment](../azure-monitor/logs/design-logs-deployment.md) +1. [Design your Azure Monitor Logs deployment](../azure-monitor/logs/workspace-design.md) 1. [Enable Azure Monitor for VMs overview](../azure-monitor/vm/vminsights-enable-overview.md) diff --git a/articles/azure-vmware/media/tutorial-create-private-cloud/create-private-cloud.png b/articles/azure-vmware/media/tutorial-create-private-cloud/create-private-cloud.png index 1031e9a8bcb7..f9546d782c2a 100644 Binary files a/articles/azure-vmware/media/tutorial-create-private-cloud/create-private-cloud.png and b/articles/azure-vmware/media/tutorial-create-private-cloud/create-private-cloud.png differ diff --git a/articles/backup/backup-azure-database-postgresql.md b/articles/backup/backup-azure-database-postgresql.md index 0608b619c962..56eff11048f9 100644 --- a/articles/backup/backup-azure-database-postgresql.md +++ b/articles/backup/backup-azure-database-postgresql.md @@ -10,7 +10,7 @@ ms.author: v-amallick # Azure Database for PostgreSQL backup with long-term retention -This article describes how to back up Azure Database for PostgreSQL server. Before you begin, review the [supported configurations, feature considerations and known limitations](https://docs.microsoft.com/azure/backup/backup-azure-database-postgresql-support-matrix) +This article describes how to back up Azure Database for PostgreSQL server. Before you begin, review the [supported configurations, feature considerations and known limitations](./backup-azure-database-postgresql-support-matrix.md) ## Configure backup on Azure PostgreSQL databases @@ -183,4 +183,4 @@ Azure Backup service creates a job for scheduled backups or if you trigger on-de ## Next steps -[Troubleshoot PostgreSQL database backup by using Azure Backup](backup-azure-database-postgresql-troubleshoot.md) +[Troubleshoot PostgreSQL database backup by using Azure Backup](backup-azure-database-postgresql-troubleshoot.md) \ No newline at end of file diff --git a/articles/backup/backup-azure-monitor-alert-faq.yml b/articles/backup/backup-azure-monitor-alert-faq.yml index eb29a92d69d6..ee45bb07a49a 100644 --- a/articles/backup/backup-azure-monitor-alert-faq.yml +++ b/articles/backup/backup-azure-monitor-alert-faq.yml @@ -35,7 +35,7 @@ sections: answer: | As per backup alerts: - - **Alert rule**: Refers to a user-created rule that specifies the condition on which an alert should be fired. [Learn more](../azure-monitor/alerts/alerts-overview.md#overview) + - **Alert rule**: Refers to a user-created rule that specifies the condition on which an alert should be fired. [Learn more](../azure-monitor/alerts/alerts-overview.md) - **Alert processing rule (earlier called Action rule)**: Refers to a user-created rule that specifies the notification channels a particular fired alert should be routed to. You can also use alert processing rules to suppress notifications for a period of time. [Learn more](../azure-monitor/alerts/alerts-action-rules.md?tabs=portal) - **Action group**: Refers to the notification channel (such as email, ITSM endpoint, logic app, webhook, and so on) that a fired alert can be routed to. [Learn more](../azure-monitor/alerts/action-groups.md) diff --git a/articles/backup/backup-instant-restore-capability.md b/articles/backup/backup-instant-restore-capability.md index 9ff88e2afade..7e19da141f84 100644 --- a/articles/backup/backup-instant-restore-capability.md +++ b/articles/backup/backup-instant-restore-capability.md @@ -115,6 +115,11 @@ In a scenario where a retention policy is set as “1”, you can find two snaps - You clean up snapshots, which are past retention. - The garbage collector (GC) in the backend is under heavy load. +> [!NOTE] +> Azure Backup manages backups in automatic way. Azure Backup retains old snapshop as these are needed to mantain this backup for consistency purpose. If you delete snapshot manually, you might encounter problem in backup consistency. +> If there are errors in your backup history, you need to stop backup with retain data option and resume the backup. +> Consider creating a **backup strategy** if you've a particular scenario (for example, a virtual machine with multiple disks and requires oversize space). You need to separately create a backup for **VM with OS Disk** and create a different backup for **the other disks**. + ### I don’t need Instant Restore functionality. Can it be disabled? Instant restore feature is enabled for everyone and can't be disabled. You can reduce the snapshot retention to a minimum of one day. diff --git a/articles/backup/backup-rbac-rs-vault.md b/articles/backup/backup-rbac-rs-vault.md index c853f7c4e0a5..dd5c2eee1288 100644 --- a/articles/backup/backup-rbac-rs-vault.md +++ b/articles/backup/backup-rbac-rs-vault.md @@ -111,12 +111,12 @@ The following table captures the Backup management actions and corresponding Azu | Validate before configuring backup | Backup Operator | Backup vault | | | | Disk Backup Reader | Disk to be backed up| | | Enable backup from backup vault | Backup Operator | Backup vault | | -| | Disk Backup Reader | Disk to be backed up | In addition, the backup vault MSI should be given [these permissions](/azure/backup/disk-backup-faq##what-are-the-permissions-used-by-azure-backup-during-backup-and-restore-operation-) | +| | Disk Backup Reader | Disk to be backed up | In addition, the backup vault MSI should be given [these permissions](./disk-backup-faq.yml) | | On demand backup of disk | Backup Operator | Backup vault | | | Validate before restoring a disk | Backup Operator | Backup vault | | | | Disk Restore Operator | Resource group where disks will be restored to | | | Restoring a disk | Backup Operator | Backup vault | | -| | Disk Restore Operator | Resource group where disks will be restored to | In addition, the backup vault MSI should be given [these permissions](/azure/backup/disk-backup-faq##what-are-the-permissions-used-by-azure-backup-during-backup-and-restore-operation-) | +| | Disk Restore Operator | Resource group where disks will be restored to | In addition, the backup vault MSI should be given [these permissions](./disk-backup-faq.yml) | ### Minimum role requirements for Azure blob backup @@ -125,12 +125,12 @@ The following table captures the Backup management actions and corresponding Azu | Validate before configuring backup | Backup Operator | Backup vault | | | | Storage account backup contributor | Storage account containing the blob | | | Enable backup from backup vault | Backup Operator | Backup vault | | -| | Storage account backup contributor | Storage account containing the blob | In addition, the backup vault MSI should be given [these permissions](/azure/backup/blob-backup-configure-manage#grant-permissions-to-the-backup-vault-on-storage-accounts) | +| | Storage account backup contributor | Storage account containing the blob | In addition, the backup vault MSI should be given [these permissions](./blob-backup-configure-manage.md#grant-permissions-to-the-backup-vault-on-storage-accounts) | | On demand backup of blob | Backup Operator | Backup vault | | | Validate before restoring a blob | Backup Operator | Backup vault | | | | Storage account backup contributor | Storage account containing the blob | | | Restoring a blob | Backup Operator | Backup vault | | -| | Storage account backup contributor | Storage account containing the blob | In addition, the backup vault MSI should be given [these permissions](/azure/backup/blob-backup-configure-manage#grant-permissions-to-the-backup-vault-on-storage-accounts) | +| | Storage account backup contributor | Storage account containing the blob | In addition, the backup vault MSI should be given [these permissions](./blob-backup-configure-manage.md#grant-permissions-to-the-backup-vault-on-storage-accounts) | ### Minimum role requirements for Azure database for PostGreSQL server backup @@ -139,12 +139,12 @@ The following table captures the Backup management actions and corresponding Azu | Validate before configuring backup | Backup Operator | Backup vault | | | | Reader | Azure PostGreSQL server | | | Enable backup from backup vault | Backup Operator | Backup vault | | -| | Contributor | Azure PostGreSQL server | Alternatively, instead of a built-in-role, you can consider a custom role which has the following permissions: Microsoft.DBforPostgreSQL/servers/write Microsoft.DBforPostgreSQL/servers/read In addition, the backup vault MSI should be given [these permissions](/azure/backup/backup-azure-database-postgresql-overview#set-of-permissions-needed-for-azure-postgresql-database-backup) | +| | Contributor | Azure PostGreSQL server | Alternatively, instead of a built-in-role, you can consider a custom role which has the following permissions: Microsoft.DBforPostgreSQL/servers/write Microsoft.DBforPostgreSQL/servers/read In addition, the backup vault MSI should be given [these permissions](./backup-azure-database-postgresql-overview.md#set-of-permissions-needed-for-azure-postgresql-database-backup) | | On demand backup of PostGreSQL server | Backup Operator | Backup vault | | | Validate before restoring a server | Backup Operator | Backup vault | | | | Contributor | Target Azure PostGreSQL server | Alternatively, instead of a built-in-role, you can consider a custom role which has the following permissions: Microsoft.DBforPostgreSQL/servers/write Microsoft.DBforPostgreSQL/servers/read | Restoring a server | Backup Operator | Backup vault | | -| | Contributor | Target Azure PostGreSQL server | Alternatively, instead of a built-in-role, you can consider a custom role which has the following permissions: Microsoft.DBforPostgreSQL/servers/write Microsoft.DBforPostgreSQL/servers/read In addition, the backup vault MSI should be given [these permissions](/azure/backup/backup-azure-database-postgresql-overview#set-of-permissions-needed-for-azure-postgresql-database-restore) | +| | Contributor | Target Azure PostGreSQL server | Alternatively, instead of a built-in-role, you can consider a custom role which has the following permissions: Microsoft.DBforPostgreSQL/servers/write Microsoft.DBforPostgreSQL/servers/read In addition, the backup vault MSI should be given [these permissions](./backup-azure-database-postgresql-overview.md#set-of-permissions-needed-for-azure-postgresql-database-restore) | ## Next steps @@ -153,4 +153,4 @@ The following table captures the Backup management actions and corresponding Azu * [PowerShell](../role-based-access-control/role-assignments-powershell.md) * [Azure CLI](../role-based-access-control/role-assignments-cli.md) * [REST API](../role-based-access-control/role-assignments-rest.md) -* [Azure role-based access control troubleshooting](../role-based-access-control/troubleshooting.md): Get suggestions for fixing common issues. +* [Azure role-based access control troubleshooting](../role-based-access-control/troubleshooting.md): Get suggestions for fixing common issues. \ No newline at end of file diff --git a/articles/backup/backup-support-matrix-mars-agent.md b/articles/backup/backup-support-matrix-mars-agent.md index a0e03999b9c9..16a3b6556e4e 100644 --- a/articles/backup/backup-support-matrix-mars-agent.md +++ b/articles/backup/backup-support-matrix-mars-agent.md @@ -61,6 +61,7 @@ The operating systems must be 64 bit and should be running the latest services p **Operating system** | **Files/folders** | **System state** | **Software/Module requirements** --- | --- | --- | --- +Windows 11 (Enterprise, Pro, Home) | Yes | No | Check the corresponding server version for software/module requirements Windows 10 (Enterprise, Pro, Home) | Yes | No | Check the corresponding server version for software/module requirements Windows Server 2022 (Standard, Datacenter, Essentials) | Yes | Yes | Check the corresponding server version for software/module requirements Windows 8.1 (Enterprise, Pro)| Yes |No | Check the corresponding server version for software/module requirements diff --git a/articles/backup/whats-new.md b/articles/backup/whats-new.md index d4db278c79ae..88b72f97835a 100644 --- a/articles/backup/whats-new.md +++ b/articles/backup/whats-new.md @@ -2,7 +2,7 @@ title: What's new in Azure Backup description: Learn about new features in Azure Backup. ms.topic: conceptual -ms.date: 05/24/2022 +ms.date: 05/26/2022 author: v-amallick ms.service: backup ms.author: v-amallick @@ -17,7 +17,6 @@ You can learn more about the new releases by bookmarking this page or by [subscr ## Updates summary - May 2022 - - [Multi-user authorization using Resource Guard is now generally available](#multi-user-authorization-using-resource-guard-is-now-generally-available) - [Archive tier support for Azure Virtual Machines is now generally available](#archive-tier-support-for-azure-virtual-machines-is-now-generally-available) - February 2022 - [Multiple backups per day for Azure Files is now generally available](#multiple-backups-per-day-for-azure-files-is-now-generally-available) @@ -41,12 +40,6 @@ You can learn more about the new releases by bookmarking this page or by [subscr - February 2021 - [Backup for Azure Blobs (in preview)](#backup-for-azure-blobs-in-preview) -## Multi-user authorization using Resource Guard is now generally available - -Azure Backup now supports multi-user authorization (MUA) that allows you to add an additional layer of protection to critical operations on your Recovery Services vaults. For MUA, Azure Backup uses the Azure resource, Resource Guard, to ensure critical operations are performed only with applicable authorization. - -For more information, see [how to protect Recovery Services vault and manage critical operations with MUA](multi-user-authorization.md). - ## Archive tier support for Azure Virtual Machines is now generally available Azure Backup now supports the movement of recovery points to the Vault-archive tier for Azure Virtual Machines from the Azure portal. This allows you to move the archivable/recommended recovery points (corresponding to a backup item) to the Vault-archive tier at one go. diff --git a/articles/batch/create-pool-ephemeral-os-disk.md b/articles/batch/create-pool-ephemeral-os-disk.md index 531c3d01a2d1..dbbfa7f7871f 100644 --- a/articles/batch/create-pool-ephemeral-os-disk.md +++ b/articles/batch/create-pool-ephemeral-os-disk.md @@ -26,7 +26,7 @@ For Batch workloads, the main benefits of using ephemeral OS disks are reduced c To determine whether a VM series supports ephemeral OS disks, check the documentation for each VM instance. For example, the [Ddv4 and Ddsv4-series](../virtual-machines/ddv4-ddsv4-series.md) supports ephemeral OS disks. -Alternately, you can programmatically query to check the 'EphemeralOSDiskSupported' capability. An example PowerShell cmdlet to query this capability is provided in the [ephemeral OS disk frequently asked questions](../virtual-machines/ephemeral-os-disks.md#frequently-asked-questions). +Alternately, you can programmatically query to check the 'EphemeralOSDiskSupported' capability. An example PowerShell cmdlet to query this capability is provided in the [ephemeral OS disk frequently asked questions](../virtual-machines/ephemeral-os-disks-faq.md). ## Create a pool that uses ephemeral OS disks diff --git a/articles/cloud-services-extended-support/deploy-prerequisite.md b/articles/cloud-services-extended-support/deploy-prerequisite.md index 23565f69125d..a86117a5c970 100644 --- a/articles/cloud-services-extended-support/deploy-prerequisite.md +++ b/articles/cloud-services-extended-support/deploy-prerequisite.md @@ -20,6 +20,9 @@ To ensure a successful Cloud Services (extended support) deployment review the b Cloud Service (extended support) deployments must be in a virtual network. Virtual network can be created through [Azure portal](../virtual-network/quick-create-portal.md), [PowerShell](../virtual-network/quick-create-powershell.md), [Azure CLI](../virtual-network/quick-create-cli.md) or [ARM Template](../virtual-network/quick-create-template.md). The virtual network and subnets must also be referenced in the Service Configuration (.cscfg) under the [NetworkConfiguration](schema-cscfg-networkconfiguration.md) section. For a virtual networks belonging to the same resource group as the cloud service, referencing only the virtual network name in the Service Configuration (.cscfg) file is sufficient. If the virtual network and cloud service are in two different resource groups, then the complete Azure Resource Manager ID of the virtual network needs to be specified in the Service Configuration (.cscfg) file. + +> [!NOTE] +> Virtual Network and cloud service located in a different resource groups is not supported in Visual Studio 2019. Please consider using the ARM template or Portal for successful deployments in such scenarios #### Virtual Network located in same resource group ```xml diff --git a/articles/cognitive-services/Custom-Vision-Service/faq.yml b/articles/cognitive-services/Custom-Vision-Service/faq.yml index 2e5dc7d0c1f8..ef3cd71c01d4 100644 --- a/articles/cognitive-services/Custom-Vision-Service/faq.yml +++ b/articles/cognitive-services/Custom-Vision-Service/faq.yml @@ -49,7 +49,7 @@ sections: - question: | Once I've trained a Custom Vision model, can I manage/deploy the same model to different regions? answer: | - We don't have a use case for publishing a model to a different region, but we do offer the ability to export/import a project into different regions. See [Copy and back up Custom Vision projects](https://docs.microsoft.com/azure/cognitive-services/custom-vision-service/copy-move-projects). + We don't have a use case for publishing a model to a different region, but we do offer the ability to export/import a project into different regions. See [Copy and back up Custom Vision projects](./copy-move-projects.md). - question: | What is the difference between Custom Vision and AutoML? answer: | @@ -64,12 +64,12 @@ sections: - question: | What is the difference between the free and standard pricing tiers? answer: | - See the [Limits and quotas](https://docs.microsoft.com/azure/cognitive-services/custom-vision-service/limits-and-quotas) page. + See the [Limits and quotas](./limits-and-quotas.md) page. - question: | How can users be added to a Cognitive Services multi-service account to collaborate on a Custom Vision project in the web portal? answer: | - You can use Azure RBAC roles to give specific users access to collaborate on a custom vision portal project. See the [Role-based access control docs](https://docs.microsoft.com/azure/cognitive-services/custom-vision-service/role-based-access-control) + You can use Azure RBAC roles to give specific users access to collaborate on a custom vision portal project. See the [Role-based access control docs](./role-based-access-control.md) - question: | Can training images be exported with the tags that were added in the Custom Vision portal? answer: | @@ -88,5 +88,4 @@ sections: - question: | How can I write logs on this service? answer: | - Use [Diagnostic logging](https://docs.microsoft.com/azure/cognitive-services/diagnostic-logging). - + Use [Diagnostic logging](../diagnostic-logging.md). \ No newline at end of file diff --git a/articles/cognitive-services/Encryption/cognitive-services-encryption-keys-portal.md b/articles/cognitive-services/Encryption/cognitive-services-encryption-keys-portal.md index 7070b6322f94..867e745bd44c 100644 --- a/articles/cognitive-services/Encryption/cognitive-services-encryption-keys-portal.md +++ b/articles/cognitive-services/Encryption/cognitive-services-encryption-keys-portal.md @@ -26,6 +26,7 @@ The process to enable Customer-Managed Keys with Azure Key Vault for Cognitive S * [Language Understanding service encryption of data at rest](../LUIS/encrypt-data-at-rest.md) * [QnA Maker encryption of data at rest](../QnAMaker/encrypt-data-at-rest.md) * [Translator encryption of data at rest](../translator/encrypt-data-at-rest.md) +* [Language service encryption of data at rest](../language-service/concepts/encryption-data-at-rest.md) ## Speech @@ -39,4 +40,4 @@ The process to enable Customer-Managed Keys with Azure Key Vault for Cognitive S ## Next steps * [What is Azure Key Vault](../../key-vault/general/overview.md)? -* [Cognitive Services Customer-Managed Key Request Form](https://aka.ms/cogsvc-cmk) \ No newline at end of file +* [Cognitive Services Customer-Managed Key Request Form](https://aka.ms/cogsvc-cmk) diff --git a/articles/cognitive-services/QnAMaker/Overview/language-support.md b/articles/cognitive-services/QnAMaker/Overview/language-support.md index 40cf417503c2..ea44e071f6f7 100644 --- a/articles/cognitive-services/QnAMaker/Overview/language-support.md +++ b/articles/cognitive-services/QnAMaker/Overview/language-support.md @@ -32,7 +32,7 @@ Consider the following: ## Supporting multiple languages in one QnA Maker resource -This functionality is not supported in our current Generally Available (GA) stable release. Check out [question answering](https://docs.microsoft.com/azure/cognitive-services/language-service/question-answering/overview) to test out this functionality. +This functionality is not supported in our current Generally Available (GA) stable release. Check out [question answering](../../language-service/question-answering/overview.md) to test out this functionality. ## Supporting multiple languages in one knowledge base @@ -129,4 +129,4 @@ This additional ranking is an internal working of the QnA Maker's ranker. ## Next steps > [!div class="nextstepaction"] -> [Language selection](../index.yml) +> [Language selection](../index.yml) \ No newline at end of file diff --git a/articles/cognitive-services/QnAMaker/limits.md b/articles/cognitive-services/QnAMaker/limits.md index 70056781437a..a12b2898158b 100644 --- a/articles/cognitive-services/QnAMaker/limits.md +++ b/articles/cognitive-services/QnAMaker/limits.md @@ -124,8 +124,8 @@ These represent the limits when Prebuilt API is used to *Generate response* or c > Support for unstructured file/content and is available only in question answering. ## Alterations limits -[Alterations](https://docs.microsoft.com/rest/api/cognitiveservices/qnamaker/alterations/replace) do not allow these special characters: ',', '?', ':', ';', '\"', '\'', '(', ')', '{', '}', '[', ']', '-', '+', '.', '/', '!', '*', '-', '_', '@', '#' +[Alterations](/rest/api/cognitiveservices/qnamaker/alterations/replace) do not allow these special characters: ',', '?', ':', ';', '\"', '\'', '(', ')', '{', '}', '[', ']', '-', '+', '.', '/', '!', '*', '-', '_', '@', '#' ## Next steps -Learn when and how to change [service pricing tiers](How-To/set-up-qnamaker-service-azure.md#upgrade-qna-maker-sku). +Learn when and how to change [service pricing tiers](How-To/set-up-qnamaker-service-azure.md#upgrade-qna-maker-sku). \ No newline at end of file diff --git a/articles/cognitive-services/Speech-Service/captioning-concepts.md b/articles/cognitive-services/Speech-Service/captioning-concepts.md index d1df98893dc6..da0d361e808a 100644 --- a/articles/cognitive-services/Speech-Service/captioning-concepts.md +++ b/articles/cognitive-services/Speech-Service/captioning-concepts.md @@ -30,7 +30,7 @@ The following are aspects to consider when using captioning: * Consider output formats such as SRT (SubRip Text) and WebVTT (Web Video Text Tracks). These can be loaded onto most video players such as VLC, automatically adding the captions on to your video. > [!TIP] -> Try the [Azure Video Indexer](/azure/azure-video-indexer/video-indexer-overview) as a demonstration of how you can get captions for videos that you upload. +> Try the [Azure Video Indexer](../../azure-video-indexer/video-indexer-overview.md) as a demonstration of how you can get captions for videos that you upload. Captioning can accompany real time or pre-recorded speech. Whether you're showing captions in real time or with a recording, you can use the [Speech SDK](speech-sdk.md) or [Speech CLI](spx-overview.md) to recognize speech and get transcriptions. You can also use the [Batch transcription API](batch-transcription.md) for pre-recorded video. @@ -240,4 +240,4 @@ There are some situations where [training a custom model](custom-speech-overview ## Next steps * [Captioning quickstart](captioning-quickstart.md) -* [Get speech recognition results](get-speech-recognition-results.md) +* [Get speech recognition results](get-speech-recognition-results.md) \ No newline at end of file diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/cpp.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/cpp.md index 42a1dda2aa2f..022bbadbecb2 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/cpp.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/cpp.md @@ -30,18 +30,21 @@ You must also install [GStreamer](~/articles/cognitive-services/speech-service/h Follow these steps to create a new console application and install the Speech SDK. -1. Download or copy the [scenarios/cpp/windows/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/go/captioning/) sample files from GitHub into a local directory. -1. Open `captioning.sln` in Visual Studio. +1. Download or copy the scenarios/cpp/windows/captioning/ sample files from GitHub into a local directory. +1. Open the `captioning.sln` solution file in Visual Studio. 1. Install the Speech SDK in your project with the NuGet package manager. ```powershell Install-Package Microsoft.CognitiveServices.Speech ``` -1. Make sure the compiler is set to **ISO C++17 Standard (/std:c++17)** at **Project** > **Properties** > **General** > **C++ Language Standard**. -1. Enter your preferred command line arguments at **Project** > **Properties** > **Debugging** > **Command Arguments**. See [usage and arguments](#usage-and-arguments) for the available options. Here is an example. Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region: +1. Open **Project** > **Properties** > **General**. Set **Configuration** to `All configurations`. Set **C++ Language Standard** to `ISO C++17 Standard (/std:c++17)`. +1. Open **Build** > **Configuration Manager**. + - On a 64-bit Windows installation, set **Active solution platform** to `x64`. + - On a 32-bit Windows installation, set **Active solution platform** to `x86`. +1. Open **Project** > **Properties** > **Debugging**. Enter your preferred command line arguments at **Command Arguments**. See [usage and arguments](#usage-and-arguments) for the available options. Here is an example: ``` --key YourSubscriptionKey --region YourServiceRegion --input c:\caption\caption.this.mp4 --format any --output c:\caption\caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. 1. Build and run the console application. The output file with complete captions is written to `c:\caption\caption.output.txt`. Intermediate results are shown in the console: ```console 00:00:00,180 --> 00:00:01,600 diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/csharp.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/csharp.md index 0ed0f3aee40d..071a6773fa85 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/csharp.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/csharp.md @@ -37,7 +37,7 @@ Follow these steps to create a new console application and install the Speech SD ```dotnetcli dotnet add package Microsoft.CognitiveServices.Speech ``` -1. Copy the [scenarios/csharp/dotnetcore/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/csharp/dotnetcore/captioning/) sample files from GitHub into your project directory. Overwrite the local copy of `Program.cs` with the file that you copy from GitHub. +1. Copy the scenarios/csharp/dotnetcore/captioning/ sample files from GitHub into your project directory. Overwrite the local copy of `Program.cs` with the file that you copy from GitHub. 1. Build the project with the .NET CLI. ```dotnetcli dotnet build @@ -46,7 +46,7 @@ Follow these steps to create a new console application and install the Speech SD ```dotnetcli dotnet run --key YourSubscriptionKey --region YourServiceRegion --input c:\caption\caption.this.mp4 --format any --output c:\caption\caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. The output file with complete captions is written to `c:\caption\caption.output.txt`. Intermediate results are shown in the console: ```console diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/go.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/go.md index 04bb8987ea0d..0e8853eb9e90 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/go.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/go.md @@ -30,7 +30,7 @@ You must also install [GStreamer](~/articles/cognitive-services/speech-service/h Follow these steps to create a new GO module and install the Speech SDK. -1. Download or copy the [scenarios/go/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/go/captioning/) sample files from GitHub into a local directory. +1. Download or copy the scenarios/go/captioning/ sample files from GitHub into a local directory. 1. Open a command prompt in the same directory as `captioning.go`. 1. Run the following commands to create a `go.mod` file that links to the Speech SDK components hosted on GitHub: ```console @@ -45,7 +45,7 @@ Follow these steps to create a new GO module and install the Speech SDK. ```console go run captioning --key YourSubscriptionKey --region YourServiceRegion --input caption.this.mp4 --format any --output caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. The output file with complete captions is written to `caption.output.txt`. Intermediate results are shown in the console: ```console diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/java.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/java.md index c9303b7fafde..e4d86eba8f5e 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/java.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/java.md @@ -71,7 +71,7 @@ Before you can do anything, you need to install the Speech SDK. The sample in th Follow these steps to create a new console application and install the Speech SDK. -1. Copy the [scenarios/java/jre/console/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/java/jre/console/captioning/) sample files from GitHub into your project directory. +1. Copy the scenarios/java/jre/captioning/ sample files from GitHub into your project directory. 1. Open a command prompt and run this command to compile the project files. ```console javac Captioning.java -cp ".;target\dependency\*" @@ -80,7 +80,7 @@ Follow these steps to create a new console application and install the Speech SD ```console java -cp ".;target\dependency\*" Captioning --key YourSubscriptionKey --region YourServiceRegion --input c:\caption\caption.this.mp4 --format any --output c:\caption\caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. The output file with complete captions is written to `c:\caption\caption.output.txt`. Intermediate results are shown in the console: ```console diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/javascript.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/javascript.md index b4b0721172d1..f719fe6e7477 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/javascript.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/javascript.md @@ -28,7 +28,7 @@ Before you can do anything, you need to install the Speech SDK for JavaScript. I Follow these steps to create a new console application and install the Speech SDK. -1. Copy the [scenarios/javascript/node/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/javascript/node/captioning/) sample files from GitHub into your project directory. +1. Copy the scenarios/javascript/node/captioning/ sample files from GitHub into your project directory. 1. Open a command prompt in the same directory as `Captioning.js`. 1. Install the Speech SDK for JavaScript: ```console @@ -38,7 +38,7 @@ Follow these steps to create a new console application and install the Speech SD ```console node captioning.js --key YourSubscriptionKey --region YourServiceRegion --input c:\caption\caption.this.wav --output c:\caption\caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. > [!NOTE] > The Speech SDK for JavaScript does not support [compressed input audio](~/articles/cognitive-services/speech-service/how-to-use-codec-compressed-audio-input-streams.md). You must use a WAV file as shown in the example. diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/python.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/python.md index ef9260ab70b4..ba74e9d57fad 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/python.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/python.md @@ -33,7 +33,7 @@ The Speech SDK for Python is available as a [Python Package Index (PyPI) module] Follow these steps to create a new console application. -1. Download or copy the [scenarios/python/console/captioning/](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/scenarios/python/console/captioning/) sample files from GitHub into a local directory. +1. Download or copy the scenarios/python/console/captioning/ sample files from GitHub into a local directory. 1. Open a command prompt in the same directory as `captioning.py`. 1. Run this command to install the Speech SDK: ```console @@ -43,7 +43,7 @@ Follow these steps to create a new console application. ```console python captioning.py --key YourSubscriptionKey --region YourServiceRegion --input caption.this.mp4 --format any --output caption.output.txt - --srt --recognizing --threshold 5 --profanity mask --phrases "Contoso;Jessie;Rehaan" ``` - Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource region. Make sure that the specified arguments for `--input` file and `--output` path exist. Otherwise you must change the path. + Replace `YourSubscriptionKey` with your Speech resource key, and replace `YourServiceRegion` with your Speech resource [region](~/articles/cognitive-services/speech-service/regions.md), such as `westus` or `northeurope`. Make sure that the paths specified by `--input` and `--output` are valid. Otherwise you must change the paths. The output file with complete captions is written to `caption.output.txt`. Intermediate results are shown in the console: ```console diff --git a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/usage-arguments.md b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/usage-arguments.md index 23eed7987660..21809c51a91b 100644 --- a/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/usage-arguments.md +++ b/articles/cognitive-services/Speech-Service/includes/quickstarts/captioning/usage-arguments.md @@ -9,12 +9,12 @@ ms.author: eur Connection options include: - `--key`: Your Speech resource key. -- `--region REGION`: Your Speech resource region. Examples: `westus`, `eastus` +- `--region REGION`: Your Speech resource region. Examples: `westus`, `northeurope` Input options include: - `--input FILE`: Input audio from file. The default input is the microphone. -- `--format FORMAT`: Use compressed audio format. Valid only with `--file`. Valid values are `alaw`, `any`, `flac`, `mp3`, `mulaw`, and `ogg_opus`. The default value is `any`. This option is not available with the JavaScript captioning sample. For compressed audio files such as MP4, install GStreamer and see [How to use compressed input audio](~/articles/cognitive-services/speech-service/how-to-use-codec-compressed-audio-input-streams.md). +- `--format FORMAT`: Use compressed audio format. Valid only with `--file`. Valid values are `alaw`, `any`, `flac`, `mp3`, `mulaw`, and `ogg_opus`. The default value is `any`. To use a `wav` file, don't specify the format. This option is not available with the JavaScript captioning sample. For compressed audio files such as MP4, install GStreamer and see [How to use compressed input audio](~/articles/cognitive-services/speech-service/how-to-use-codec-compressed-audio-input-streams.md). Language options include: diff --git a/articles/cognitive-services/Translator/language-support.md b/articles/cognitive-services/Translator/language-support.md index 4c440b93d52d..9d82944ab18d 100644 --- a/articles/cognitive-services/Translator/language-support.md +++ b/articles/cognitive-services/Translator/language-support.md @@ -28,24 +28,22 @@ ms.author: lajanuar > [!NOTE] > Language code `pt` will default to `pt-br`, Portuguese (Brazil). -> -> ☼ Indicates the language is not available for scanned PDF document translation. -|Language | Language code | ☼ Cloud – Text Translation and Document Translation | Containers – Text Translation|Custom Translator|Auto Language Detection|Dictionary +|Language | Language code | Cloud – Text Translation and Document Translation | Containers – Text Translation|Custom Translator|Auto Language Detection|Dictionary |:-|:-:|:-:|:-:|:-:|:-:|:-:| | Afrikaans | `af` |✔|✔|✔|✔|✔| | Albanian | `sq` |✔|✔||✔|| -| Amharic ☼ | `am` |✔|✔|||| +| Amharic | `am` |✔|✔|||| | Arabic | `ar` |✔|✔|✔|✔|✔| -| Armenian ☼ | `hy` |✔|✔||✔|| -| Assamese ☼ | `as` |✔|✔|✔||| +| Armenian | `hy` |✔|✔||✔|| +| Assamese | `as` |✔|✔|✔||| | Azerbaijani (Latin) | `az` |✔|✔|||| -| Bangla ☼ | `bn` |✔|✔|✔||✔| -| Bashkir ☼ | `ba` |✔||||| +| Bangla | `bn` |✔|✔|✔||✔| +| Bashkir | `ba` |✔||||| | Basque | `eu` |✔||||| | Bosnian (Latin) | `bs` |✔|✔|✔||✔| | Bulgarian | `bg` |✔|✔|✔|✔|✔| -| Cantonese (Traditional) ☼ | `yue` |✔|✔|||| +| Cantonese (Traditional) | `yue` |✔|✔|||| | Catalan | `ca` |✔|✔|✔|✔|✔| | Chinese (Literary) | `lzh` |✔||||| | Chinese Simplified | `zh-Hans` |✔|✔|✔|✔|✔| @@ -54,7 +52,7 @@ ms.author: lajanuar | Czech | `cs` |✔|✔|✔|✔|✔| | Danish | `da` |✔|✔|✔|✔|✔| | Dari | `prs` |✔|✔|||| -| Divehi ☼ | `dv` |✔|||✔|| +| Divehi | `dv` |✔|||✔|| | Dutch | `nl` |✔|✔|✔|✔|✔| | English | `en` |✔|✔|✔|✔|✔| | Estonian | `et` |✔|✔|✔|✔|| @@ -65,55 +63,55 @@ ms.author: lajanuar | French | `fr` |✔|✔|✔|✔|✔| | French (Canada) | `fr-ca` |✔|✔|||| | Galician | `gl` |✔||||| -| Georgian ☼ | `ka` |✔|||✔|| +| Georgian | `ka` |✔|||✔|| | German | `de` |✔|✔|✔|✔|✔| -| Greek ☼ | `el` |✔|✔|✔|✔|✔| -| Gujarati ☼ | `gu` |✔|✔|✔|✔|| +| Greek | `el` |✔|✔|✔|✔|✔| +| Gujarati | `gu` |✔|✔|✔|✔|| | Haitian Creole | `ht` |✔|✔||✔|✔| -| Hebrew ☼ | `he` |✔|✔|✔|✔|✔| +| Hebrew | `he` |✔|✔|✔|✔|✔| | Hindi | `hi` |✔|✔|✔|✔|✔| | Hmong Daw (Latin) | `mww` |✔|✔|||✔| | Hungarian | `hu` |✔|✔|✔|✔|✔| | Icelandic | `is` |✔|✔|✔|✔|✔| | Indonesian | `id` |✔|✔|✔|✔|✔| -| Inuinnaqtun ☼ | `ikt` |✔||||| -| Inuktitut ☼ | `iu` |✔|✔|✔|✔|| +| Inuinnaqtun | `ikt` |✔||||| +| Inuktitut | `iu` |✔|✔|✔|✔|| | Inuktitut (Latin) | `iu-Latn` |✔||||| | Irish | `ga` |✔|✔|✔|✔|| | Italian | `it` |✔|✔|✔|✔|✔| | Japanese | `ja` |✔|✔|✔|✔|✔| -| Kannada ☼ | `kn` |✔|✔|✔||| +| Kannada | `kn` |✔|✔|✔||| | Kazakh | `kk` |✔|✔|||| -| Khmer ☼ | `km` |✔|✔||✔|| +| Khmer | `km` |✔|✔||✔|| | Klingon | `tlh-Latn` |✔| ||✔|✔| -| Klingon (plqaD) ☼ | `tlh-Piqd` |✔| ||✔|| +| Klingon (plqaD) | `tlh-Piqd` |✔| ||✔|| | Korean | `ko` |✔|✔|✔|✔|✔| | Kurdish (Central) | `ku` |✔|✔||✔|| -| Kurdish (Northern) ☼ | `kmr` |✔|✔|||| +| Kurdish (Northern) | `kmr` |✔|✔|||| | Kyrgyz (Cyrillic) | `ky` |✔||||| -| Lao ☼ | `lo` |✔|✔||✔|| -| Latvian ☼| `lv` |✔|✔|✔|✔|✔| +| Lao | `lo` |✔|✔||✔|| +| Latvian | `lv` |✔|✔|✔|✔|✔| | Lithuanian | `lt` |✔|✔|✔|✔|✔| -| Macedonian ☼ | `mk` |✔|||✔|| -| Malagasy ☼ | `mg` |✔|✔|✔||| +| Macedonian | `mk` |✔|||✔|| +| Malagasy | `mg` |✔|✔|✔||| | Malay (Latin) | `ms` |✔|✔|✔|✔|✔| -| Malayalam ☼ | `ml` |✔|✔|✔||| +| Malayalam | `ml` |✔|✔|✔||| | Maltese | `mt` |✔|✔|✔|✔|✔| | Maori | `mi` |✔|✔|✔||| | Marathi | `mr` |✔|✔|✔||| -| Mongolian (Cyrillic) ☼| `mn-Cyrl` |✔||||| -| Mongolian (Traditional) ☼ | `mn-Mong` |✔|||✔|| -| Myanmar ☼ | `my` |✔|✔||✔|| +| Mongolian (Cyrillic) | `mn-Cyrl` |✔||||| +| Mongolian (Traditional) | `mn-Mong` |✔|||✔|| +| Myanmar | `my` |✔|✔||✔|| | Nepali | `ne` |✔|✔|||| | Norwegian | `nb` |✔|✔|✔|✔|✔| -| Odia ☼ | `or` |✔|✔|✔||| +| Odia | `or` |✔|✔|✔||| | Pashto | `ps` |✔|✔||✔|| | Persian | `fa` |✔|✔|✔|✔|✔| | Polish | `pl` |✔|✔|✔|✔|✔| | Portuguese (Brazil) | `pt` |✔|✔|✔|✔|✔| | Portuguese (Portugal) | `pt-pt` |✔|✔|||| | Punjabi | `pa` |✔|✔|✔||| -| Queretaro Otomi ☼ | `otq` |✔|✔|||| +| Queretaro Otomi | `otq` |✔|✔|||| | Romanian | `ro` |✔|✔|✔|✔|✔| | Russian | `ru` |✔|✔|✔|✔|✔| | Samoan (Latin) | `sm` |✔| |✔||| @@ -125,13 +123,13 @@ ms.author: lajanuar | Spanish | `es` |✔|✔|✔|✔|✔| | Swahili (Latin) | `sw` |✔|✔|✔|✔|✔| | Swedish | `sv` |✔|✔|✔|✔|✔| -| Tahitian ☼ | `ty` |✔| |✔|✔|| -| Tamil ☼ | `ta` |✔|✔|✔||✔| +| Tahitian | `ty` |✔| |✔|✔|| +| Tamil | `ta` |✔|✔|✔||✔| | Tatar (Latin) | `tt` |✔||||| -| Telugu ☼ | `te` |✔|✔|✔||| -| Thai ☼ | `th` |✔| |✔|✔|✔| -| Tibetan ☼ | `bo` |✔|||| -| Tigrinya ☼ | `ti` |✔|✔|||| +| Telugu | `te` |✔|✔|✔||| +| Thai | `th` |✔| |✔|✔|✔| +| Tibetan | `bo` |✔|||| +| Tigrinya | `ti` |✔|✔|||| | Tongan | `to` |✔|✔|✔||| | Turkish | `tr` |✔|✔|✔|✔|✔| | Turkmen (Latin) | `tk` |✔|||| @@ -140,7 +138,7 @@ ms.author: lajanuar | Urdu | `ur` |✔|✔|✔|✔|✔| | Uyghur (Arabic) | `ug` |✔|||| | Uzbek (Latin | `uz` |✔|||✔|| -| Vietnamese ☼ | `vi` |✔|✔|✔|✔|✔| +| Vietnamese | `vi` |✔|✔|✔|✔|✔| | Welsh | `cy` |✔|✔|✔|✔|✔| | Yucatec Maya | `yua` |✔|✔||✔|| | Zulu | `zu` |✔||||| diff --git a/articles/cognitive-services/cognitive-services-and-machine-learning.md b/articles/cognitive-services/cognitive-services-and-machine-learning.md index 6409fce4c5b7..4b4a00881354 100644 --- a/articles/cognitive-services/cognitive-services-and-machine-learning.md +++ b/articles/cognitive-services/cognitive-services-and-machine-learning.md @@ -10,7 +10,7 @@ ms.date: 10/28/2021 --- # Cognitive Services and machine learning -Cognitive Services provides machine learning capabilities to solve general problems such as analyzing text for emotional sentiment or analyzing images to recognize objects or faces. You don't need special machine learning or data science knowledge to use these services. +Cognitive Services provides machine learning capabilities to solve general problems such as analyzing text for emotional sentiment or analyzing images to recognize objects or faces. You don't need special machine learning or data science knowledge to use these services. [Cognitive Services](./what-are-cognitive-services.md) is a group of services, each supporting different, generalized prediction capabilities. The services are divided into different categories to help you find the right service. @@ -25,9 +25,9 @@ Cognitive Services provides machine learning capabilities to solve general probl Use Cognitive Services when you: * Can use a generalized solution. -* Access solution from a programming REST API or SDK. +* Access solution from a programming REST API or SDK. -Use another machine-learning solution when you: +Use other machine-learning solutions when you: * Need to choose the algorithm and need to train on very specific data. @@ -50,7 +50,7 @@ Both have the end-goal of applying artificial intelligence (AI) to enhance busin Generally, the audiences are different: * Cognitive Services are for developers without machine-learning experience. -* Azure Machine Learning is tailored for data scientists. +* Azure Machine Learning is tailored for data scientists. ## How is a Cognitive Service different from machine learning? diff --git a/articles/cognitive-services/cognitive-services-apis-create-account.md b/articles/cognitive-services/cognitive-services-apis-create-account.md index 0a33766c2749..4d546eb665bf 100644 --- a/articles/cognitive-services/cognitive-services-apis-create-account.md +++ b/articles/cognitive-services/cognitive-services-apis-create-account.md @@ -8,7 +8,7 @@ manager: nitinme keywords: cognitive services, cognitive intelligence, cognitive solutions, ai services ms.service: cognitive-services ms.topic: conceptual -ms.date: 03/03/2022 +ms.date: 05/24/2022 ms.author: aahi --- diff --git a/articles/cognitive-services/create-account-bicep.md b/articles/cognitive-services/create-account-bicep.md index f7bdb20fc7b9..cf6bbe30e850 100644 --- a/articles/cognitive-services/create-account-bicep.md +++ b/articles/cognitive-services/create-account-bicep.md @@ -13,18 +13,20 @@ ms.custom: subject-armqs, mode-arm # Quickstart: Create a Cognitive Services resource using Bicep -This quickstart describes how to use Bicep to create Cognitive Services. +Follow this quickstart to create Cognitive Services resource using Bicep. Azure Cognitive Services are cloud-base services with REST APIs, and client library SDKs available to help developers build cognitive intelligence into applications without having direct artificial intelligence (AI) or data science skills or knowledge. Azure Cognitive Services enables developers to easily add cognitive features into their applications with cognitive solutions that can see, hear, speak, understand, and even begin to reason. -Create a resource using Bicep. This multi-service resource lets you: +[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] + +## Things to consider + +Using Bicep to create a Cognitive Service resource lets you create a multi-service resource. This enables you to: * Access multiple Azure Cognitive Services with a single key and endpoint. * Consolidate billing from the services you use. * [!INCLUDE [terms-azure-portal](./includes/quickstarts/terms-azure-portal.md)] -[!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] - ## Prerequisites * If you don't have an Azure subscription, [create one for free](https://azure.microsoft.com/free/cognitive-services). diff --git a/articles/cognitive-services/includes/register-resource-subscription.md b/articles/cognitive-services/includes/register-resource-subscription.md index 5858297b9699..b2d74681446f 100644 --- a/articles/cognitive-services/includes/register-resource-subscription.md +++ b/articles/cognitive-services/includes/register-resource-subscription.md @@ -2,8 +2,8 @@ title: include file description: include file ms.topic: include -ms.date: 07/27/2020 +ms.date: 05/25/2022 --- > [!Tip] -> If your subscription doesn't allow you to create a Cognitive Service resource, you may need to enable that ability of the [Azure resource provider](../../azure-resource-manager/management/resource-providers-and-types.md#azure-portal) with the [Azure portal](../../azure-resource-manager/management/resource-providers-and-types.md#azure-powershell), [PowerShell command](../../azure-resource-manager/management/resource-providers-and-types.md#azure-powershell) or an [Azure CLI command](../../azure-resource-manager/management/resource-providers-and-types.md#azure-cli). If you are not the subscription owner, ask the _Subscription Owner_ or someone with a role of _admin_ to complete the registration for you or ask for the /register/action privileges granted to your account. \ No newline at end of file +> If your subscription doesn't allow you to create a Cognitive Service resource, you may need to enable that ability of the [Azure resource provider](../../azure-resource-manager/management/resource-providers-and-types.md#register-resource-provider) with the [Azure portal](../../azure-resource-manager/management/resource-providers-and-types.md#azure-portal), [PowerShell command](../../azure-resource-manager/management/resource-providers-and-types.md#azure-powershell) or an [Azure CLI command](../../azure-resource-manager/management/resource-providers-and-types.md#azure-cli). If you are not the subscription owner, ask the _Subscription Owner_ or someone with a role of _admin_ to complete the registration for you or ask for the /register/action privileges granted to your account. \ No newline at end of file diff --git a/articles/cognitive-services/language-service/concepts/encryption-data-at-rest.md b/articles/cognitive-services/language-service/concepts/encryption-data-at-rest.md index 7f994e1d9370..3c87343ec684 100644 --- a/articles/cognitive-services/language-service/concepts/encryption-data-at-rest.md +++ b/articles/cognitive-services/language-service/concepts/encryption-data-at-rest.md @@ -27,7 +27,7 @@ By default, your subscription uses Microsoft-managed encryption keys. There is a There is also an option to manage your subscription with your own keys. Customer-managed keys (CMK), also known as Bring your own key (BYOK), offer greater flexibility to create, rotate, disable, and revoke access controls. You can also audit the encryption keys used to protect your data. -You must use Azure Key Vault to store your customer-managed keys. You can either create your own keys and store them in a key vault, or you can use the Azure Key Vault APIs to generate keys. The Cognitive Services resource and the key vault must be in the same region and in the same Azure Active Directory (Azure AD) tenant, but they can be in different subscriptions. For more information about Azure Key Vault, see [What is Azure Key Vault?](/azure/key-vault/general/overview). +You must use Azure Key Vault to store your customer-managed keys. You can either create your own keys and store them in a key vault, or you can use the Azure Key Vault APIs to generate keys. The Cognitive Services resource and the key vault must be in the same region and in the same Azure Active Directory (Azure AD) tenant, but they can be in different subscriptions. For more information about Azure Key Vault, see [What is Azure Key Vault?](../../../key-vault/general/overview.md). ### Customer-managed keys for Language services diff --git a/articles/cognitive-services/language-service/concepts/model-lifecycle.md b/articles/cognitive-services/language-service/concepts/model-lifecycle.md index 9a36e59fc920..6e7725b18baa 100644 --- a/articles/cognitive-services/language-service/concepts/model-lifecycle.md +++ b/articles/cognitive-services/language-service/concepts/model-lifecycle.md @@ -9,7 +9,7 @@ ms.service: cognitive-services ms.subservice: language-service ms.custom: event-tier1-build-2022 ms.topic: conceptual -ms.date: 05/09/2022 +ms.date: 05/25/2022 ms.author: aahi --- @@ -61,8 +61,8 @@ Use the table below to find which model versions are supported by each feature: | Question answering | `2021-10-01` | `2021-10-01` | | | Text Analytics for health | `2021-05-15`, `2022-03-01` | `2022-03-01` | | | Key phrase extraction | `2021-06-01` | `2021-06-01` | | -| Text summarization | `2021-08-01` | `2021-08-01` | | - +| Document summarization (preview) | `2021-08-01` | | `2021-08-01` | +| Conversation summarization (preview) | `2022-05-15-preview` | | `2022-05-15-preview` | ## Custom features diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/faq.md b/articles/cognitive-services/language-service/conversational-language-understanding/faq.md index fc80815c1190..c4fdbe54b318 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/faq.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/faq.md @@ -21,9 +21,14 @@ Use this article to quickly get the answers to common questions about conversati See the [quickstart](./quickstart.md) to quickly create your first project, or the [how-to article](./how-to/create-project.md) for more details. -## How do I connect conversation language projects to other service applications? -See the [orchestration workflow documentation](../orchestration-workflow/overview.md) for more information. +## Can I use more than one conversational language understanding project together? + +Yes, using orchestration workflow. See the [orchestration workflow documentation](../orchestration-workflow/overview.md) for more information. + +## What is the difference between LUIS and conversational language understanding? + +Conversational language understanding is the next generation of LUIS. ## Training is taking a long time, is this expected? @@ -61,13 +66,27 @@ Yes, you can [import any LUIS application](./concepts/backwards-compatibility.md No, the service only supports JSON format. You can go to LUIS, import the `.LU` file and export it as a JSON file. +## Can I use conversational language understanding with custom question answering? + +Yes, you can use [orchestration workflow](../orchestration-workflow/overview.md) to orchestrate between different conversational language understanding and [question answering](../question-answering/overview.md) projects. Start by creating orchestration workflow projects, then connect your conversational language understanding and custom question answering projects. To perform this action, make sure that your projects are under the same Language resource. + +## How do I handle out of scope or domain utterances that aren't relevant to my intents? + +Add any out of scope utterances to the [none intent](./concepts/none-intent.md). + ## Is there any SDK support? -Yes, only for predictions, and [samples are available](https://aka.ms/cluSampleCode). There is currently no authoring support for the SDK. +Yes, only for predictions, and samples are available for [Python](https://aka.ms/sdk-samples-conversation-python) and [C#](https://aka.ms/sdk-sample-conversation-dot-net). There is currently no authoring support for the SDK. + +## What are the training modes? + -## Can I connect to Orchestration workflow projects? +|Training mode | Description | Language availability | Pricing | +|---------|---------|---------|---------| +|Standard training | Faster training times for quicker model iteration. | Can only train projects in English. | Included in your [pricing tier](https://azure.microsoft.com/pricing/details/cognitive-services/language-service/). | +|Advanced training | Slower training times using fine-tuned neural network transformer models. | Can train [multilingual projects](language-support.md#multi-lingual-option). | May incur [additional charges](https://azure.microsoft.com/pricing/details/cognitive-services/language-service/). -Yes, you can connect your CLU project in orchestration workflow. All you need is to make sure that both projects are under the same Language resource +See [training modes](how-to/train-model.md#training-modes) for more information. ## Are there APIs for this feature? diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/glossary.md b/articles/cognitive-services/language-service/conversational-language-understanding/glossary.md index b25fff1aeff4..041145de67da 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/glossary.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/glossary.md @@ -21,7 +21,7 @@ Use this article to learn about some of the definitions and terms you may encoun Entities are words in utterances that describe information used to fulfill or identify an intent. If your entity is complex and you would like your model to identify specific parts, you can break your model into subentities. For example, you might want your model to predict an address, but also the subentities of street, city, state, and zipcode. ## F1 score -The F1 score is a function of Precision and Recall. It's needed when you seek a balance between [precision](#precision) and [recall](#recall]. +The F1 score is a function of Precision and Recall. It's needed when you seek a balance between [precision](#precision) and [recall](#recall). ## Intent An intent represents a task or action the user wants to perform. It's a purpose or goal expressed in a user's input, such as booking a flight, or paying a bill. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/build-schema.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/build-schema.md index 8cd146049f46..71862d7376fc 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/build-schema.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/build-schema.md @@ -43,7 +43,7 @@ They might create an intent to represent each of these actions. They might also To build a project schema within [Language Studio](https://aka.ms/languageStudio): -1. Select **Build schema** from the left side menu. +1. Select **Schema definition** from the left side menu. 2. From the top pivots, you can change the view to be **Intents** or **Entities**. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/tag-utterances.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/tag-utterances.md index 0262e5405ddd..e59e569f9313 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/tag-utterances.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/tag-utterances.md @@ -91,7 +91,7 @@ Use the following steps to label your utterances: > [!NOTE] - > list and prebuilt components are not shown in the tag utterances page, and all labels here only apply to the **learned component**. + > list and prebuilt components are not shown in the data labeling page, and all labels here only apply to the **learned component**. To remove a label: 1. From within your utterance, select the entity you want to remove a label from. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/view-model-evaluation.md b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/view-model-evaluation.md index 6a1978f8a2d9..e14168fd4890 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/how-to/view-model-evaluation.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/how-to/view-model-evaluation.md @@ -33,6 +33,9 @@ See the [project development lifecycle](../overview.md#project-development-lifec ### [Language studio](#tab/Language-studio) +> [!Note] +> The results here are for the machine learning entity component only. + In the **view model details** page, you'll be able to see all your models, with their current training status, and the date they were last trained. [!INCLUDE [Model performance](../includes/language-studio/model-performance.md)] diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/delete-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/delete-model.md index 5d36ed8b5105..a86367f82a33 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/delete-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/delete-model.md @@ -12,7 +12,7 @@ ms.author: aahi To delete your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **View model details** from the left side menu. +1. Select **Model performance** from the left side menu. 2. Click on the **model name** you want to delete and click **Delete** from the top menu. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/deploy-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/deploy-model.md index 9e8961590132..4c3f82d6ea90 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/deploy-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/deploy-model.md @@ -12,9 +12,9 @@ ms.author: aahi To deploy your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Deploy model** from the left side menu. +1. Select **Deploying a model** from the left side menu. -2. Click on **Start deployment job** to start a new deployment job. +2. Click on **Add deployment** to start a new deployment job. :::image type="content" source="../../media/add-deployment-model.png" alt-text="A screenshot showing the model deployment button in Language Studio." lightbox="../../media/add-deployment-model.png"::: diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/get-prediction-url.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/get-prediction-url.md index fae7a0e0ee19..18f0ed8fa0a4 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/get-prediction-url.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/get-prediction-url.md @@ -15,12 +15,10 @@ ms.custom: language-service-clu 1. After the deployment job is completed successfully, select the deployment you want to use and from the top menu click on **Get prediction URL**. - + :::image type="content" source="../../media/prediction-url.png" alt-text="A screenshot showing the prediction URL in Language Studio." lightbox="../../media/prediction-url.png"::: 2. In the window that appears, copy the sample request URL and body into your command line. 3. Replace `` with the actual text you want to send to extract intents and entities from. - - 4. Submit the `POST` cURL request in your terminal or command prompt. You'll receive a 202 response with the API results if the request was successful. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/import-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/import-project.md index fb9c689e72da..4f739377d997 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/import-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/import-project.md @@ -20,5 +20,5 @@ ms.author: aahi :::image type="content" source="../../media/projects-page.png" alt-text="A screenshot showing the conversation project page in Language Studio." lightbox="../../media/projects-page.png"::: -3. In the window that appears, upload the JSON file you want to import. Make sure that your file follows the [supported JSON format](). +3. In the window that appears, upload the JSON file you want to import. Make sure that your file follows the [supported JSON format](../../concepts/data-formats.md). diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/model-performance.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/model-performance.md index 4f0db66ccbb1..443463af7d89 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/model-performance.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/model-performance.md @@ -11,7 +11,7 @@ ms.author: aahi 1. Go to your project page in [Language Studio](https://aka.ms/languageStudio). -2. Select **View model details** from the menu on the left side of the screen. +2. Select **Model performance** from the menu on the left side of the screen. 3. In this page you can only view the successfully trained models, F1 score of each model and [model expiration date](../../../concepts/model-lifecycle.md#expiration-timeline). You can click on the model name for more details about its performance. @@ -23,4 +23,4 @@ ms.author: aahi > [!NOTE] > If you don't see any of the intents or entities you have in your model displayed here, it is because they weren't in any of the utterances that were used for the test set. - \ No newline at end of file + diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/train-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/train-model.md index c45270a6c593..ec5ee03d9aca 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/train-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/language-studio/train-model.md @@ -24,9 +24,9 @@ To start training your model from within the [Language Studio](https://aka.ms/la 6. Click on the **Train** button. -5. Click on the Training Job ID from the list, a side pane will appear where you can check **Training progress** and **Job status** and other details for this job. + :::image type="content" source="../../media/train-model.png" alt-text="A screenshot showing the training page in Language Studio." lightbox="../../media/train-model.png"::: - +5. Click on the Training Job ID from the list, a side pane will appear where you can check **Training progress** and **Job status** and other details for this job. > [!NOTE] > * Only successfully completed training jobs will generate models. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/language-studio.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/language-studio.md index 7d3ad3de8f5d..cdf02d0f4ba3 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/language-studio.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/language-studio.md @@ -22,15 +22,15 @@ ms.custom: ignite-fall-2021 Once you have a Language resource created, create a conversational language understanding project. A project is a work area for building your custom ML models based on your data. Your project can only be accessed by you and others who have access to the Language resource being used. -For this quickstart, you can download [this sample project]() and import it. This project can predict the intended commands from user input, such as: reading emails, deleting emails, and attaching a document to an email. +For this quickstart, you can download [this sample project](https://go.microsoft.com/fwlink/?linkid=2196152) and import it. This project can predict the intended commands from user input, such as: reading emails, deleting emails, and attaching a document to an email. [!INCLUDE [Import project](../language-studio/import-project.md)] -Once the upload is complete, you will land on **Build schema** page. For this quickstart, the schema is already built, and utterances are already tagged with intents and entities. +Once the upload is complete, you will land on **Build schema** page. For this quickstart, the schema is already built, and utterances are already labeled with intents and entities. ## Train your model -Typically, after you create a project, you should [build schema]() and [tag utterances](). For this quickstart, we already imported a ready project with built schema and tagged utterances. +Typically, after you create a project, you should [build schema](../../how-to/build-schema.md) and [label utterances](../../how-to/tag-utterances.md). For this quickstart, we already imported a ready project with built schema and labeled utterances. To train a model, you need to start a training job. The output of a successful training job is your trained model. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/rest-api.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/rest-api.md index 30de64140aa9..4dbf048a91f5 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/rest-api.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/quickstarts/rest-api.md @@ -14,8 +14,6 @@ ms.custom: ignite-fall-2021 * Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services). -## Create a Language resource from Azure portal - [!INCLUDE [create a new resource from the Azure portal](../resource-creation-azure-portal.md)] ## Get your resource keys and endpoint diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/cancel-training.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/cancel-training.md index aedf441bdf3e..993074985b05 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/cancel-training.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/cancel-training.md @@ -16,15 +16,15 @@ Create a **POST** request using the following URL, headers, and JSON body to can Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID} | This is the training job ID| |`XXXXX-XXXXX-XXXX-XX| -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{JOB-ID}` | This is the training job ID. |`XXXXX-XXXXX-XXXX-XX`| +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -34,5 +34,5 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a 204 response indicating success, which means your training job has been canceled. +Once you send your API request, you will receive a 202 response indicating success, which means your training job has been canceled. A successful call results with an `Operation-Location` header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/create-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/create-project.md index 7e4e7520fffd..b0436f1cf253 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/create-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/create-project.md @@ -23,7 +23,7 @@ Use the following URL when creating your API request. Replace the placeholder va |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -39,14 +39,11 @@ Use the following sample JSON as your body. ```json { - "projectKind": "conversation", - "settings": { - "confidenceThreshold": 0 - }, "projectName": "{PROJECT-NAME}", - "multilingual": true, + "language": "{LANGUAGE-CODE}", + "projectKind": "Conversation", "description": "Project description", - "language": "{LANGUAGE-CODE}" + "multilingual": true } ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-deployment.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-deployment.md index a04b0b40850b..cb1c2f7b5b12 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-deployment.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-deployment.md @@ -9,15 +9,13 @@ ms.date: 05/16/2022 ms.author: aahi --- - - Create a **DELETE** request using the following URL, headers, and JSON body to delete a conversational language understanding deployment. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,7 +23,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment name. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-model.md index 7f576ca3e5bd..e2b6d98981c2 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-model.md @@ -17,7 +17,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,7 +25,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your model name. This value is case-sensitive. | `model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -36,4 +36,4 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your model has been deleted. +Once you send your API request, you will receive a `204` response indicating success, which means your model has been deleted. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-project.md index 34a8bb73cc9a..a41c4f9e5ca9 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/delete-project.md @@ -22,7 +22,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/deploy-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/deploy-model.md index a5fb5835bb12..b2f77f3eb398 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/deploy-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/deploy-model.md @@ -15,7 +15,7 @@ Create a **PUT** request using the following URL, headers, and JSON body to star #### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -23,7 +23,7 @@ Create a **PUT** request using the following URL, headers, and JSON body to star |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -49,7 +49,7 @@ Use the following header to authenticate your request. Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` You can use this URL to get the deployment job status. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/export-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/export-project.md index 8040ddc127a9..c84c384e3872 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/export-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/export-project.md @@ -16,14 +16,14 @@ Create a **POST** request using the following URL, headers, and JSON body to exp Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:export?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:export?stringIndexType=Utf16CodeUnit&api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -37,7 +37,7 @@ Use the following header to authenticate your request. Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` `JOB-ID` is used to identify your request, since this operation is asynchronous. Use this URL to get the exported project JSON, using the same authentication method. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-deployment-status.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-deployment-status.md index 0af1708e93f5..f722f463c784 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-deployment-status.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-deployment-status.md @@ -14,7 +14,7 @@ Use the following **GET** request to get the status of your deployment job. Repl ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -23,7 +23,7 @@ Use the following **GET** request to get the status of your deployment job. Repl |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received from the API in response to your model deployment request. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-export-status.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-export-status.md index 12ce15fd89fe..91c530af0326 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-export-status.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-export-status.md @@ -12,7 +12,7 @@ ms.author: aahi Use the following **GET** request to query the status of your export job. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -20,7 +20,7 @@ Use the following **GET** request to query the status of your export job. You ca |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your export job status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -34,18 +34,12 @@ Use the following header to authenticate your request. ```json { - "resultUrl": "{RESULT-URL}", - "jobId": "string", - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", - "expirationDateTime": "2021-10-19T23:24:41.572Z", - "status": "unknown", - "errors": [ - { - "code": "unknown", - "message": "string" - } - ] + "resultUrl": "{Endpoint}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/xxxxxx-xxxxx-xxxxx-xx/result?api-version={API-VERSION}", + "jobId": "xxxx-xxxxx-xxxxx-xxx", + "createdDateTime": "2022-04-18T15:23:07Z", + "lastUpdatedDateTime": "2022-04-18T15:23:08Z", + "expirationDateTime": "2022-04-25T15:23:07Z", + "status": "succeeded" } ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-import-status.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-import-status.md index 842e32d705d0..e9bd93f4cde8 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-import-status.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-import-status.md @@ -14,7 +14,7 @@ ms.custom: language-service-clu Use the following **GET** request to query the status of your import job. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/import/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -22,7 +22,7 @@ Use the following **GET** request to query the status of your import job. You ca |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your export job status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -39,31 +39,11 @@ Once you send the request, you will get the following response. Keep polling thi ```json { - "jobId": "string", - "createdDateTime": "2022-04-25T10:54:07.950Z", - "lastUpdatedDateTime": "2022-04-25T10:54:07.950Z", - "expirationDateTime": "2022-04-25T10:54:07.950Z", - "status": "unknown", - "warnings": [ - { - "code": "InvalidRequest", - "message": "string", - "target": "string", - "details": [ - "string" - ] - } - ], - "errors": [ - { - "code": "InvalidRequest", - "message": "string", - "target": "string", - "details": [ - "string" - ] - } - ] + "jobId": "xxxxx-xxxxx-xxxx-xxxxx", + "createdDateTime": "2022-04-18T15:17:20Z", + "lastUpdatedDateTime": "2022-04-18T15:17:22Z", + "expirationDateTime": "2022-04-25T15:17:20Z", + "status": "succeeded" } ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-project-details.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-project-details.md index fd6e2c47da6e..6fdf64effa2b 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-project-details.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-project-details.md @@ -12,14 +12,14 @@ ms.author: aahi Use the following **GET** request to get your project details. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -32,16 +32,15 @@ Use the following header to authenticate your request. #### Response body ```json - { - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", - "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", - "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", - "type": "conversation", - "name": "myProject", - "multiLingual": true, - "description": "string", - "language": "en-us", - "settings": {} - } +{ + "createdDateTime": "2022-04-18T13:53:03Z", + "lastModifiedDateTime": "2022-04-18T13:53:03Z", + "lastTrainedDateTime": "2022-04-18T14:14:28Z", + "lastDeployedDateTime": "2022-04-18T14:49:01Z", + "projectKind": "Conversation", + "projectName": "{PROJECT-NAME}", + "multilingual": true, + "description": "This is a sample conversation project.", + "language": "{LANGUAGE-CODE}" +} ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-training-status.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-training-status.md index 062e0ef42ae4..094f9bf18ef5 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-training-status.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/get-training-status.md @@ -14,7 +14,7 @@ Use the following **GET** request to get the status of your model's training pro ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -22,7 +22,7 @@ Use the following **GET** request to get the status of your model's training pro |`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received when submitted your training job. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -43,18 +43,21 @@ Once you send the request, you will get the following response. Keep polling thi "modelLabel": "{MODEL-LABEL}", "trainingConfigVersion": "{TRAINING-CONFIG-VERSION}", "trainingMode": "{TRAINING-MODE}", + "estimatedEndDateTime": "2022-04-18T15:47:58.8190649Z", "trainingStatus": { - "percentComplete": 2, - "startDateTime": "{START-TIME}", - "status": "{STATUS}" + "percentComplete": 3, + "startDateTime": "2022-04-18T15:45:06.8190649Z", + "status": "running" }, - "evaluationStatus": { "percentComplete": 0, "status": "notStarted" }, - "estimatedEndDateTime": "{ESTIMATED-END-TIME}" + "evaluationStatus": { + "percentComplete": 0, + "status": "notStarted" + } }, - "jobId": "{JOB-ID}", - "createdDateTime": "{CREATED-TIME}", - "lastUpdatedDateTime": "{UPDATED-TIME}", - "expirationDateTime": "{EXPIRATION-TIME}", + "jobId": "xxxxx-xxxxx-xxxx-xxxxx-xxxx", + "createdDateTime": "2022-04-18T15:44:44Z", + "lastUpdatedDateTime": "2022-04-18T15:45:48Z", + "expirationDateTime": "2022-04-25T15:44:44Z", "status": "running" } ``` @@ -72,5 +75,3 @@ Once you send the request, you will get the following response. Keep polling thi |`lastUpdatedDateTime`| Training job last updated date and time | `2022-04-14T10:23:45Z`| |`expirationDateTime`| Training job expiration date and time | `2022-04-14T10:22:42Z`| - - diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-luis-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-luis-project.md index a64d6f464944..4489dc9e13f5 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-luis-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-luis-project.md @@ -24,7 +24,7 @@ Use the following URL when creating your API request. Replace the placeholder va |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -41,7 +41,7 @@ Use the following sample JSON as your body. ```json { "api-version":"{API-VERSION}" , - "stringIndexType": "Utf16CodeUnit", + "stringIndexType": "Utf16CodeUnit", "metadata": { "projectKind": "conversation", "settings": { @@ -53,6 +53,7 @@ Use the following sample JSON as your body. "language": "{LANGUAGE-CODE}" }, "assets": { + "projectKind": "luis", "intents": [ { "category": "Read" @@ -95,7 +96,7 @@ Use the following sample JSON as your body. |Key |Placeholder |Value | Example | |---------|---------|----------|--| -| `api-version` | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. | `2022-03-01-preview` | +| `api-version` | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. | `2022-05-01` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `EmailApp` | | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the utterances used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the utterances. |`en-us`| | `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any [supported language](../../language-support.md); not necessarily a language included in your training documents. | `true`| diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-project.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-project.md index b2a2980b9c2b..d14cb442a80a 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-project.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/import-project.md @@ -24,7 +24,7 @@ Use the following URL when creating your API request. Replace the placeholder va |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -40,51 +40,51 @@ Use the following sample JSON as your body. ```json { - "api-version":"{API-VERSION}" , - "stringIndexType": "Utf16CodeUnit", - "metadata": { - "projectKind": "conversation", - "settings": { - "confidenceThreshold": 0.7 - }, - "projectName": "{PROJECT-NAME}", - "multilingual": true, - "description": "Trying out CLU", - "language": "{LANGUAGE-CODE}" + "projectFileVersion": "{API-VERSION}", + "stringIndexType": "Utf16CodeUnit", + "metadata": { + "projectKind": "Conversation", + "settings": { + "confidenceThreshold": 0.7 }, + "projectName": "{PROJECT-NAME}", + "multilingual": true, + "description": "Trying out CLU", + "language": "{LANGUAGE-CODE}" + }, "assets": { + "projectKind": "Conversation", "intents": [ { - "category": "Read" + "category": "intent1" }, { - "category": "Delete" + "category": "intent2" } ], "entities": [ { - "category": "Sender" + "category": "entity1" } ], "utterances": [ { - "text": "Open Blake's email", - "language": "{LANGUAGE-CODE}", + "text": "text1", "dataset": "{DATASET}", - "intent": "Read", + "intent": "intent1", "entities": [ { - "category": "Sender", + "category": "entity1", "offset": 5, "length": 5 } ] }, { - "text": "Delete last email", + "text": "text2", "language": "{LANGUAGE-CODE}", "dataset": "{DATASET}", - "intent": "Attach", + "intent": "intent2", "entities": [] } ] @@ -95,7 +95,7 @@ Use the following sample JSON as your body. |Key |Placeholder |Value | Example | |---------|---------|----------|--| -| api-version | `{API-VERSION}` | The version of the API you're calling. The version used here must be the same API model version in the URL. See the [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) article to learn more. | `2022-03-01-preview` | +| api-version | `{API-VERSION}` | The version of the API you're calling. The version used here must be the same API model version in the URL. See the [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) article to learn more. | `2022-05-01` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `EmailApp` | | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the utterances used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the utterances. |`en-us`| | `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset. When your model is deployed, you can query the model in any [supported language](../../language-support.md#multi-lingual-option). This includes languages that aren't included in your training documents. | `true`| diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/model-evaluation.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/model-evaluation.md index a60d0dbe8309..cb0f248fad3e 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/model-evaluation.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/model-evaluation.md @@ -9,15 +9,13 @@ ms.date: 05/16/2022 ms.author: aahi --- - - Create a **GET** request using the following URL, headers, and JSON body to get the trained model evaluation summary. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,7 +23,7 @@ Create a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -35,7 +33,7 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Response Body diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/project-details.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/project-details.md index 37c22cef2ee7..2c0bba856388 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/project-details.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/project-details.md @@ -21,7 +21,7 @@ To get your project details, submit a **GET** request using the following URL an |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -37,20 +37,16 @@ Once you send the request, you will get the following response. ```json { - "createdDateTime": "{CREATED-TIME}", - "lastModifiedDateTime": "{CREATED-TIME}", - "lastTrainedDateTime": "{CREATED-TIME}", - "lastDeployedDateTime": "{CREATED-TIME}", - "projectKind": "conversation", - "settings": { - "confidenceThreshold": 0 - }, + "createdDateTime": "2022-04-18T13:53:03Z", + "lastModifiedDateTime": "2022-04-18T13:53:03Z", + "lastTrainedDateTime": "2022-04-18T14:14:28Z", + "lastDeployedDateTime": "2022-04-18T14:49:01Z", + "projectKind": "Conversation", "projectName": "{PROJECT-NAME}", "multilingual": true, - "description": "string", + "description": "This is a sample conversation project.", "language": "{LANGUAGE-CODE}" } - ``` -Once you send your API request, you will receive a `202` response indicating success and JSON response body with your project details. +Once you send your API request, you will receive a `200` response indicating success and JSON response body with your project details. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/query-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/query-model.md index 4be4d2bbf46f..9aeb8bb5e166 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/query-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/query-model.md @@ -21,7 +21,7 @@ Create a **POST** request using the following URL, headers, and JSON body to sta |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -36,20 +36,19 @@ Use the following header to authenticate your request. ```json { - "kind": "CustomConversation", - "analysisInput": { - "conversationItem": { - "participantId":"{JOB-NAME}", - "id":"{JOB-NAME}", - "modality":"text", - "text":"{TEST-UTTERANCE}", - "language":"{LANGUAGE-CODE}", - } - }, - "parameters": { - "projectName": "{PROJECT-NAME}", - "deploymentName": "{DEPLOYMENT-NAME}" - } + "kind": "Conversation", + "analysisInput": { + "conversationItem": { + "id": "1", + "participantId": "1", + "text": "Text 1" + } + }, + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}", + "stringIndexType": "TextElement_V8" + } } ``` @@ -59,8 +58,6 @@ Use the following header to authenticate your request. | `participantId` | `{JOB-NAME}` | | `"MyJobName` | | `id` | `{JOB-NAME}` | | `"MyJobName` | | `text` | `{TEST-UTTERANCE}` | The utterance that you want to predict its intent and extract entities from. | `"Read Matt's email` | -| `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the utterance submitted. Learn more about supported language codes [here](../../language-support.md) |`en-us`| -| `id` | `{JOB-NAME}` | | `"MyJobName` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `deploymentName` | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | @@ -70,33 +67,37 @@ Once you send the request, you will get the following response for the predictio ```json { - "kind": "CustomConversationResult", - "results": { - "query": "Read Matt's email", - "prediction": { - "projectKind": "conversation", - "topIntent": "Read", - "intents": [ - { - "category": "Read", - "confidenceScore": 0.9403077 - }, - { - "category": "Delete", - "confidenceScore": 0.016843017 - }, - ], - "entities": [ - { - "category": "SenderName", - "text": "Matt", - "offset": 5, - "length": 4, - "confidenceScore": 1 - } - ] + "kind": "ConversationResult", + "result": { + "query": "Text1", + "prediction": { + "topIntent": "inten1", + "projectKind": "Conversation", + "intents": [ + { + "category": "intent1", + "confidenceScore": 1 + }, + { + "category": "intent2", + "confidenceScore": 0 + }, + { + "category": "intent3", + "confidenceScore": 0 + } + ], + "entities": [ + { + "category": "entity1", + "text": 'text1", + "offset": 29, + "length": 12, + "confidenceScore": 1 } + ] } + } } ``` diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/swap-deployment.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/swap-deployment.md index 3f1920cd1cea..05b799eee97c 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/swap-deployment.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/swap-deployment.md @@ -15,14 +15,14 @@ Create a **POST** request using the following URL, headers, and JSON body to sta ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments:swap?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments:swap?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -32,7 +32,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | ### Request Body @@ -44,10 +43,9 @@ Use the following header to authenticate your request. ``` -|Key| value| Example| -|--|--|--| -|`firstDeploymentName` | The name for your first deployment. This value is case-sensitive. | `production` | -|`secondDeploymentName` | The name for your second deployment. This value is case-sensitive. | `staging` | - +|Key|Placeholder| Value| Example| +|--|--|--|--| +|firstDeploymentName |`{FIRST-DEPLOYMENT-NAME}`| The name for your first deployment. This value is case-sensitive. | `production` | +|secondDeploymentName | `{SECOND-DEPLOYMENT-NAME}`|The name for your second deployment. This value is case-sensitive. | `staging` | Once you send your API request, you will receive a `202` response indicating success. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/train-model.md b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/train-model.md index bd8462fba7c0..a47c8d085244 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/train-model.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/includes/rest-api/train-model.md @@ -16,14 +16,14 @@ Create a **POST** request using the following URL, headers, and JSON body to sub Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/:train?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:train?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -32,7 +32,7 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Request body @@ -41,12 +41,12 @@ Use the following object in your request. The model will be named `MyModel` once ```json { "modelLabel": "{MODEL-NAME}", - "trainingConfigVersion": "{CONFIG-VERSION}", "trainingMode": "{TRAINING-MODE}", + "trainingConfigVersion": "{CONFIG-VERSION}", "evaluationOptions": { "kind": "percentage", - "trainingSplitPercentage": 0, - "testingSplitPercentage": 0 + "testingSplitPercentage": 20, + "trainingSplitPercentage": 80 } } ``` @@ -65,6 +65,6 @@ Use the following object in your request. The model will be named `MyModel` once Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} ``` You can use this URL to get the training job status. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/language-support.md b/articles/cognitive-services/language-service/conversational-language-understanding/language-support.md index 35309f037c2b..d773cc83797d 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/language-support.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/language-support.md @@ -19,6 +19,9 @@ Use this article to learn about the languages currently supported by CLU feature ## Multi-lingual option +> [!TIP] +> See [How to train a model](how-to/train-model.md#training-modes) for information on which training mode you should use for multilingual projects. + With conversational language understanding, you can train a model in one language and use to predict intents and entities from utterances in another language. This feature is powerful because it helps save time and effort. Instead of building separate projects for every language, you can handle multi-lingual dataset in one project. Your dataset doesn't have to be entirely in the same language but you should enable the multi-lingual option for your project while creating or later in project settings. If you notice your model performing poorly in certain languages during the evaluation process, consider adding more data in these languages to your training set. You can train your project entirely with English utterances, and query it in: French, German, Mandarin, Japanese, Korean, and others. Conversational language understanding makes it easy for you to scale your projects to multiple languages by using multilingual technology to train your models. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/prediction-url.png b/articles/cognitive-services/language-service/conversational-language-understanding/media/prediction-url.png index 41a9ed122d4d..2e388bf621a8 100644 Binary files a/articles/cognitive-services/language-service/conversational-language-understanding/media/prediction-url.png and b/articles/cognitive-services/language-service/conversational-language-understanding/media/prediction-url.png differ diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/select-custom-clu.png b/articles/cognitive-services/language-service/conversational-language-understanding/media/select-custom-clu.png index abcc27ea11be..28c97fea76da 100644 Binary files a/articles/cognitive-services/language-service/conversational-language-understanding/media/select-custom-clu.png and b/articles/cognitive-services/language-service/conversational-language-understanding/media/select-custom-clu.png differ diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/media/train-model-tutorial.png b/articles/cognitive-services/language-service/conversational-language-understanding/media/train-model-tutorial.png deleted file mode 100644 index 2bd94dacdb8e..000000000000 Binary files a/articles/cognitive-services/language-service/conversational-language-understanding/media/train-model-tutorial.png and /dev/null differ diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/overview.md b/articles/cognitive-services/language-service/conversational-language-understanding/overview.md index e605fbbbe099..d1b3884d34d5 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/overview.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/overview.md @@ -17,7 +17,7 @@ ms.custom: language-service-clu, ignite-fall-2021 Conversational language understanding is one of the custom features offered by [Azure Cognitive Service for Language](../overview.md). It is a cloud-based API service that applies machine-learning intelligence to enable you to build natural language understanding component to be used in an end-to-end conversational application. -Conversational language understanding (CLU) enables users to build custom natural language understanding models to predict the overall intention of an incoming utterance and extract important information from it. CLU only provides the intelligence to understand the input text for the client application and doesn't perform any actions. By creating a CLU project, developers can iteratively tag utterances, train and evaluate model performance before making it available for consumption. The quality of the tagged data greatly impacts model performance. To simplify building and customizing your model, the service offers a custom web portal that can be accessed through the [Language studio](https://aka.ms/languageStudio). You can easily get started with the service by following the steps in this [quickstart](quickstart.md). +Conversational language understanding (CLU) enables users to build custom natural language understanding models to predict the overall intention of an incoming utterance and extract important information from it. CLU only provides the intelligence to understand the input text for the client application and doesn't perform any actions. By creating a CLU project, developers can iteratively label utterances, train and evaluate model performance before making it available for consumption. The quality of the labeled data greatly impacts model performance. To simplify building and customizing your model, the service offers a custom web portal that can be accessed through the [Language studio](https://aka.ms/languageStudio). You can easily get started with the service by following the steps in this [quickstart](quickstart.md). This documentation contains the following article types: @@ -57,9 +57,9 @@ Follow these steps to get the most out of your model: 1. **Build schema**: Know your data and define the actions and relevant information that needs to be recognized from user's input utterances. In this step you create the [intents](glossary.md#intent) that you want to assign to user's utterances, and the relevant [entities](glossary.md#entity) you want extracted. -2. **Tag data**: The quality of data tagging is a key factor in determining model performance. +2. **Label data**: The quality of data labeling is a key factor in determining model performance. -3. **Train model**: Your model starts learning from your tagged data. +3. **Train model**: Your model starts learning from your labeled data. 4. **View model evaluation details**: View the evaluation details for your model to determine how well it performs when introduced to new data. diff --git a/articles/cognitive-services/language-service/conversational-language-understanding/tutorials/bot-framework.md b/articles/cognitive-services/language-service/conversational-language-understanding/tutorials/bot-framework.md index bafb6f422a29..abb7198593cf 100644 --- a/articles/cognitive-services/language-service/conversational-language-understanding/tutorials/bot-framework.md +++ b/articles/cognitive-services/language-service/conversational-language-understanding/tutorials/bot-framework.md @@ -9,7 +9,7 @@ ms.reviewer: cahann, hazemelh ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 05/17/2022 +ms.date: 05/25/2022 --- # Integrate conversational language understanding with Bot Framework @@ -23,23 +23,22 @@ This tutorial will explain how to integrate your own conversational language und - Create a [Language resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics) in the Azure portal to get your key and endpoint. After it deploys, select **Go to resource**. - You will need the key and endpoint from the resource you create to connect your bot to the API. You'll paste your key and endpoint into the code below later in the tutorial. -- Download the **Core Bot** for CLU [sample in C#](https://aka.ms/clu-botframework-overview). - - Clone the entire Bot Framework Samples repository to get access to this sample project. - +- Download the **CoreBotWithCLU** [sample](https://aka.ms/clu-botframework-overview). + - Clone the entire samples repository to get access to this solution. ## Import a project in conversational language understanding -1. Copy the [FlightBooking.json](https://aka.ms/clu-botframework-json) file in the **Core Bot** for CLU sample. +1. Download the [FlightBooking.json](https://aka.ms/clu-botframework-json) file in the **Core Bot with CLU** sample, in the _Cognitive Models_ folder. 2. Sign into the [Language Studio](https://language.cognitive.azure.com/) and select your Language resource. 3. Navigate to [Conversational Language Understanding](https://language.cognitive.azure.com/clu/projects) and click on the service. This will route you the projects page. Click the Import button next to the Create New Project button. Import the FlightBooking.json file with the project name as **FlightBooking**. This will automatically import the CLU project with all the intents, entities, and utterances. :::image type="content" source="../media/import.png" alt-text="A screenshot showing where to import a J son file." lightbox="../media/import.png"::: -4. Once the project is loaded, click on **Training** on the left. Press on Start a training job, provide the model name **v1** and press Train. All other settings such as **Standard Training** and the evaluation settings can be left as is. +4. Once the project is loaded, click on **Training jobs** on the left. Press on Start a training job, provide the model name **v1** and press Train. All other settings such as **Standard Training** and the evaluation settings can be left as is. - :::image type="content" source="../media/train-model-tutorial.png" alt-text="A screenshot of the training page in C L U." lightbox="../media/train-model-tutorial.png"::: + :::image type="content" source="../media/train-model.png" alt-text="A screenshot of the training page in C L U." lightbox="../media/train-model.png"::: -5. Once training is complete, click to **Deployments** on the left. Click on Add Deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment. +5. Once training is complete, click to **Deploying a model** on the left. Click on Add Deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment. :::image type="content" source="../media/deploy-model-tutorial.png" alt-text="A screenshot of the deployment page within the deploy model screen in C L U." lightbox="../media/deploy-model-tutorial.png"::: @@ -52,7 +51,7 @@ In the **Core Bot** sample, update your [appsettings.json](https://aka.ms/clu-bo - The _CluProjectName_ is **FlightBooking**. - The _CluDeploymentName_ is **Testing** - The _CluAPIKey_ can be either of the keys in the **Keys and Endpoint** section for your Language resource in the [Azure portal](https://portal.azure.com). You can also copy your key from the Project Settings tab in CLU. -- The _CluAPIHostName_ is the endpoint found in the **Keys and Endpoint** section for your Language resource in the Azure portal. Note the format should be ```.cognitiveservices.azure.com``` without `https://` +- The _CluAPIHostName_ is the endpoint found in the **Keys and Endpoint** section for your Language resource in the Azure portal. Note the format should be ```.cognitiveservices.azure.com``` without `https://`. ```json { @@ -67,7 +66,7 @@ In the **Core Bot** sample, update your [appsettings.json](https://aka.ms/clu-bo ## Identify integration points -In the Core Bot sample, under the CLU folder, you can check out the **FlightBookingRecognizer.cs** file. Here is where the CLU API call to the deployed endpoint is made to retrieve the CLU prediction for intents and entities. +In the Core Bot sample, you can check out the **FlightBookingRecognizer.cs** file. Here is where the CLU API call to the deployed endpoint is made to retrieve the CLU prediction for intents and entities. ```csharp public FlightBookingRecognizer(IConfiguration configuration) @@ -91,7 +90,7 @@ In the Core Bot sample, under the CLU folder, you can check out the **FlightBook ``` -Under the folder Dialogs folder, find the **MainDialog** which uses the following to make a CLU prediction. +Under the Dialogs folder, find the **MainDialog** which uses the following to make a CLU prediction. ```csharp var cluResult = await _cluRecognizer.RecognizeAsync(stepContext.Context, cancellationToken); @@ -136,7 +135,7 @@ Run the sample locally on your machine **OR** run the bot from a terminal or fro ### Run the bot from a terminal -From a terminal, navigate to `samples/csharp_dotnetcore/90.core-bot-with-clu/90.core-bot-with-clu` +From a terminal, navigate to the `cognitive-service-language-samples/CoreBotWithCLU` folder. Then run the following command @@ -149,8 +148,8 @@ dotnet run 1. Launch Visual Studio 1. From the top navigation menu, select **File**, **Open**, then **Project/Solution** -1. Navigate to the `samples/csharp_dotnetcore/90.core-bot-with-clu/90.core-bot-with-clu` folder -1. Select the `CoreBotWithCLU.csproj` file +1. Navigate to the `cognitive-service-language-samples/CoreBotWithCLU` folder +1. Select the `CoreBotCLU.csproj` file 1. Press `F5` to run the project diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-azure-portal.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-azure-portal.md index 38cb4538e218..7e266601cf1a 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-azure-portal.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-azure-portal.md @@ -31,11 +31,11 @@ ms.author: aahi |Location | The [location](../service-limits.md#regional-availability) of your Language resource. | |Pricing tier | The [pricing tier](../service-limits.md#language-resource-limits) for your Language resource. | -7. In the **Custom text classification & custom named entity recognition** section, select an existing storage account or select **Create a new storage account**. These values are to help you get started, and not necessarily the [storage account values](/azure/storage/common/storage-account-overview) you’ll want to use in production environments. To avoid latency during building your project connect to storage accounts in the same region as your Language resource. +7. In the **Custom text classification & custom named entity recognition** section, select an existing storage account or select **Create a new storage account**. These values are to help you get started, and not necessarily the [storage account values](../../../../storage/common/storage-account-overview.md) you’ll want to use in production environments. To avoid latency during building your project connect to storage accounts in the same region as your Language resource. |Storage account value |Recommended value | |---------|---------| | Name | Any name | | Account kind| Storage (general purpose v1) | | Performance | Standard | - | Replication | Locally redundant storage (LRS) | + | Replication | Locally redundant storage (LRS) | \ No newline at end of file diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-powershell.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-powershell.md index dcca4887e338..7b331b128937 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-powershell.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/resource-creation-powershell.md @@ -37,4 +37,4 @@ New-AzResourceGroupDeployment -Name ExampleDeployment -ResourceGroupName Example -TemplateParameterFile ``` -See the ARM template documentation for information on [deploying templates](/azure/azure-resource-manager/templates/deploy-powershell#parameter-files) and [parameter files](/azure/azure-resource-manager/templates/parameter-files#parameter-file). +See the ARM template documentation for information on [deploying templates](../../../../azure-resource-manager/templates/deploy-powershell.md#parameter-files) and [parameter files](../../../../azure-resource-manager/templates/parameter-files.md#parameter-file). \ No newline at end of file diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/cancel-training.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/cancel-training.md index 934f1b4eee82..455ae61beaaf 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/cancel-training.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/cancel-training.md @@ -9,23 +9,22 @@ ms.date: 05/06/2022 ms.author: aahi --- - -Create a **POST** request using the following URL, headers, and JSON body to cancel a training job. +Create a **POST** request by using the following URL, headers, and JSON body to cancel a training job. ### Request URL Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID} | This is the training job ID| |`XXXXX-XXXXX-XXXX-XX| -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{JOB-ID}` | This value is the training job ID.| `XXXXX-XXXXX-XXXX-XX`| +|`{API-VERSION}` | The version of the API you're calling. The value referenced is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -35,4 +34,4 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a 204 response indicating success, which means your training job has been canceled. +After you send your API request, you'll receive a 202 response with an `Operation-Location` header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/create-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/create-project.md index 0d153efe901f..96dfe10babb8 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/create-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/create-project.md @@ -8,21 +8,27 @@ ms.topic: include ms.date: 04/06/2022 ms.author: aahi --- +To start creating a custom named entity recognition model, you need to create a project. Creating a project will let you label data, train, evaluate, improve, and deploy your models. > [!NOTE] -> The project name is case sensitive for all operations. +> The project name is case-sensitive for all operations. -Create a **POST** request using the following URL, headers, and JSON body to create your project and import the tags file. +Create a **PATCH** request using the following URL, headers, and JSON body to create your project. -Use the following URL to create a project and import your tags file. Replace the placeholder values below with your own values. +### Request URL + +Use the following URL to create a project. Replace the placeholder values below with your own values. ```rest -{YOUR-ENDPOINT}/language/analyze-text/projects/{projectName}/:import?api-version=2021-11-01-preview +{Endpoint}/language/authoring/analyze-text/projects/{projectName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| -|`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + ### Headers @@ -34,91 +40,34 @@ Use the following header to authenticate your request. ### Body -Use the following JSON in your request. Replace the placeholder values below with your own values. Use the tags file available in the [sample data](https://github.com/Azure-Samples/cognitive-services-sample-data-files) tab +Use the following JSON in your request. Replace the placeholder values below with your own values. ```json { - "api-version": "2021-11-01-preview", - "metadata": { - "name": "MyProject", - "multiLingual": true, - "description": "Trying out custom NER", - "modelType": "Extraction", - "language": "string", - "storageInputContainerName": "YOUR-CONTAINER-NAME", - "settings": {} - }, - "assets": { - "extractors": [ - { - "name": "Entity1" - }, - { - "name": "Entity2" - } - ], - "documents": [ - { - "location": "doc1.txt", - "language": "en-us", - "dataset": "Train", - "extractors": [ - { - "regionOffset": 0, - "regionLength": 500, - "labels": [ - { - "extractorName": "Entity1", - "offset": 25, - "length": 10 - }, - { - "extractorName": "Entity2", - "offset": 120, - "length": 8 - } - ] - } - ] - }, - { - "location": "doc2.txt", - "language": "en-us", - "dataset": "Test", - "extractors": [ - { - "regionOffset": 0, - "regionLength": 100, - "labels": [ - { - "extractorName": "Entity2", - "offset": 20, - "length": 5 - } - ] - } - ] - } - ] - } + "projectName": "{PROJECT-NAME}", + "language": "{LANGUAGE-CODE}", + "projectKind": "CustomEntityRecognition", + "description": "Project description", + "multilingual": "True", + "storageInputContainerName": "{CONTAINER-NAME}" } + ``` -For the metadata key: -|Key |Value | Example | -|---------|---------|---------| -| `modelType` | Your Model type. | Extraction | -|`storageInputContainerName` | The name of your Azure blob storage container. | `myContainer` | +|Key |Placeholder|Value | Example | +|---------|---------|---------|--| +| projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | +| language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| +| projectKind | `CustomEntityRecognition` | Your project kind. | `CustomEntityRecognition` | +| multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| +| storageInputContainerName | `{CONTAINER-NAME` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | -For the documents key: -|Key |Value | Example | -|---------|---------|---------| -| `location` | Document name on the blob store. | `doc2.txt` | -|`language` | The language of the document. | `en-us` | -|`dataset` | Optional field to specify the dataset which this document will belong to. | `Train` or `Test` | -This request will return an error if: +This request will return a 201 response, which means that the project is created. + +This request will return an error if: * The selected resource doesn't have proper permission for the storage account. + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-deployment.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-deployment.md index c7e5444c999d..e6ed2ba22f60 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-deployment.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-deployment.md @@ -16,7 +16,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/deployments/{deploymentName}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{deploymentName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -24,8 +24,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment name. This value is case-sensitive. | `prod` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -34,7 +33,8 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | -Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. +Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. A successful call results with an `Operation-Location` header used to check the status of the job. + + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-model.md index 701eb26e050a..adba4130cc77 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-model.md @@ -10,15 +10,13 @@ ms.date: 05/09/2022 ms.author: aahi --- - - -Create a **DELETE** request using the following URL, headers, and JSON body to delete a model. +Create a **DELETE** request using the following URL, headers, and JSON body to delete a trained model. ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,8 +24,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your model name. This value is case-sensitive. | `model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -38,4 +35,4 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your model has been deleted. +Once you send your API request, you will receive a `204` response indicating success, which means your trained model has been deleted. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-project.md index 093b037c5a56..b6014ca7afe4 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/delete-project.md @@ -12,15 +12,14 @@ ms.author: aahi When you no longer need your project, you can delete it with the following **DELETE** request. Replace the placeholder values with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -31,4 +30,4 @@ Use the following header to authenticate your request. |Ocp-Apim-Subscription-Key| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. +Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. A successful call results with an Operation-Location header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/deploy-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/deploy-model.md index f0555a0c5d8f..ff56bb64c7c8 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/deploy-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/deploy-model.md @@ -13,7 +13,7 @@ ms.author: aahi Submit a **PUT** request using the following URL, headers, and JSON body to submit a deployment job. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} ``` | Placeholder |Value | Example | @@ -21,8 +21,7 @@ Submit a **PUT** request using the following URL, headers, and JSON body to subm | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -46,10 +45,10 @@ Use the following JSON in the body of your request. Use the name of the model yo |---------|---------|-----|----| | trainedModelLabel | `{MODEL-NAME}` | The model name that will be assigned to your deployment. You can only assign successfully trained models. This value is case-sensitive. | `myModel` | -Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` -`{JOB-ID}` is used to identify your request, since this operation is asynchronous. You can use this URL to get the deployment status. +`{JOB-ID}` is used to identify your request, since this operation is asynchronous. You can use this URL to get the deployment status. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/export-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/export-project.md index 05b717ab973e..e8de2da47b6d 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/export-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/export-project.md @@ -17,15 +17,14 @@ Create a **POST** request using the following URL, headers, and JSON body to exp Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/:export?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:export?stringIndexType=Utf16CodeUnit&api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `MyProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -51,4 +50,4 @@ Once you send your API request, you’ll receive a `202` response indicating tha {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} ``` -{JOB-ID} is used to identify your request, since this operation is asynchronous. You’ll use this URL to get the export job status. +`{JOB-ID}` is used to identify your request, since this operation is asynchronous. You’ll use this URL to get the export job status. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-deployment-status.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-deployment-status.md index 2dce6782b23d..1f03f702088f 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-deployment-status.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-deployment-status.md @@ -21,9 +21,8 @@ Use the following **GET** request to query the status of the deployment job. You | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | -|`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -36,7 +35,7 @@ Use the following header to authenticate your request. ### Response Body -Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". +Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". You should get a `200` code to indicate the success of the request. ```json { @@ -47,3 +46,4 @@ Once you send the request, you will get the following response. Keep polling thi "status":"running" } ``` + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-export-status.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-export-status.md index fcb9fb3b49f7..08a21e709d2f 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-export-status.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-export-status.md @@ -22,8 +22,7 @@ Use the following **GET** request to get the status of exporting your project as |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -39,9 +38,9 @@ Use the following header to authenticate your request. { "resultUrl": "{RESULT-URL}", "jobId": "string", - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", - "expirationDateTime": "2021-10-19T23:24:41.572Z", + "createdDateTime": "2021-10-19T23:24:41.572Z", + "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", + "expirationDateTime": "2021-10-19T23:24:41.572Z", "status": "unknown", "errors": [ { diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-import-status.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-import-status.md index 43573fb361d3..33d6875e4411 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-import-status.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-import-status.md @@ -22,8 +22,7 @@ Use the following **GET** request to get the status of your importing your proje |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -32,3 +31,4 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-project-details.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-project-details.md index f7397748659b..04977c1f1ea6 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-project-details.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-project-details.md @@ -19,9 +19,7 @@ Use the following **GET** request to get your project details. Replace the place |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | - - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -39,12 +37,12 @@ Use the following header to authenticate your request. "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", - "modelType": "{MODEL-TYPE}", + "projectKind": "CustomEntityRecognition", "storageInputContainerName": "{CONTAINER-NAME}", - "name": "myProject", - "multiLingual": true, - "description": "string", - "language": "en-us", - "settings": {} + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project description", + "language": "{LANGUAGE-CODE}" } ``` + diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-results.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-results.md index 0ea16acbc6bf..79c066f5a91a 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-results.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-results.md @@ -13,8 +13,14 @@ ms.author: aahi Use the following **GET** request to query the status/results of the custom entity recognition task. ```rest -{ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/{JOB-ID} +{ENDPOINT}/language/analyze-text/jobs/{JOB-ID}?api-version={API-VERSION} ``` + +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + #### Headers |Key|Value| @@ -27,69 +33,62 @@ The response will be a JSON document with the following parameters ```json { - "createdDateTime": "2021-05-19T14:32:25.578Z", - "displayName": "MyJobName", - "expirationDateTime": "2021-05-19T14:32:25.578Z", - "jobId": "3fa85f64-5717-4562-b3fc-2c963f66afa6", - "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", - "status": "completed", - "errors": [], - "tasks": { - "details": { - "name": "{JOB-NAME}", - "lastUpdateDateTime": "2021-03-29T19:50:23Z", - "status": "completed" - }, - "completed": 1, - "failed": 0, - "inProgress": 0, - "total": 1, - "tasks": { - "customEntityRecognitionTasks": [ - { - "lastUpdateDateTime": "2021-05-19T14:32:25.579Z", - "name": "{JOB-NAME}", - "status": "completed", - "results": { - "documents": [ - { - "id": "{DOC-ID}", - "entities": [ - { - "text": "Government", - "category": "restaurant_name", - "offset": 23, - "length": 10, - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - }, - { - "id": "{DOC-ID}", - "entities": [ - { - "text": "David Schmidt", - "category": "artist", - "offset": 0, - "length": 13, - "confidenceScore": 0.8022353 - } - ], - "warnings": [] - } - ], - "errors": [], - "statistics": { - "documentsCount":0, - "validDocumentsCount":0, - "erroneousDocumentsCount":0, - "transactionsCount":0 + "createdDateTime": "2021-05-19T14:32:25.578Z", + "displayName": "MyJobName", + "expirationDateTime": "2021-05-19T14:32:25.578Z", + "jobId": "xxxx-xxxx-xxxxx-xxxxx", + "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", + "status": "succeeded", + "tasks": { + "completed": 1, + "failed": 0, + "inProgress": 0, + "total": 1, + "items": [ + { + "kind": "EntityRecognitionLROResults", + "taskName": "Recognize Entities", + "lastUpdateDateTime": "2020-10-01T15:01:03Z", + "status": "succeeded", + "results": { + "documents": [ + { + "entities": [ + { + "category": "Event", + "confidenceScore": 0.61, + "length": 4, + "offset": 18, + "text": "trip" + }, + { + "category": "Location", + "confidenceScore": 0.82, + "length": 7, + "offset": 26, + "subcategory": "GPE", + "text": "Seattle" + }, + { + "category": "DateTime", + "confidenceScore": 0.8, + "length": 9, + "offset": 34, + "subcategory": "DateRange", + "text": "last week" } - } - } - ] + ], + "id": "1", + "warnings": [] + } + ], + "errors": [], + "modelVersion": "2020-04-01" } - } + } + ] + } +} + ``` diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-training-status.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-training-status.md index 875c47f47ae8..5df144cb6847 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-training-status.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/get-training-status.md @@ -22,8 +22,7 @@ Use the following **GET** request to get the status of your model's training pro |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -39,39 +38,25 @@ Once you send the request, you’ll get the following response. ```json { - "jobs": [ - { - "result": { - "trainedModelLabel": "{MODEL-NAME}", - "trainingConfigVersion": "string", - "trainStatus": { - "percentComplete": 0, - "elapsedTime": "string" - }, - "evaluationStatus": { - "percentComplete": 0, - "elapsedTime": "string" - } - }, - "jobId": "string", - "createdDateTime": "2022-04-12T12:13:28.771Z", - "lastUpdatedDateTime": "2022-04-12T12:13:28.771Z", - "expirationDateTime": "2022-04-12T12:13:28.771Z", - "status": "unknown", - "warnings": [ - { - "code": "unknown", - "message": "string" - } - ], - "errors": [ - { - "code": "unknown", - "message": "string" - } - ] + "result": { + "modelLabel": "{MODEL-NAME}", + "trainingConfigVersion": "{CONFIG-VERSION}", + "estimatedEndDateTime": "2022-04-18T15:47:58.8190649Z", + "trainingStatus": { + "percentComplete": 3, + "startDateTime": "2022-04-18T15:45:06.8190649Z", + "status": "running" + }, + "evaluationStatus": { + "percentComplete": 0, + "status": "notStarted" } - ] + }, + "jobId": "{JOB-ID}", + "createdDateTime": "2022-04-18T15:44:44Z", + "lastUpdatedDateTime": "2022-04-18T15:45:48Z", + "expirationDateTime": "2022-04-25T15:44:44Z", + "status": "running" } ``` diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/import-project.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/import-project.md index 0e35074ac24d..707e3cc8b601 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/import-project.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/import-project.md @@ -10,18 +10,19 @@ ms.date: 05/05/2022 ms.author: aahi --- -Submit a **POST** request using the following URL, headers, and JSON body to import your tags file. Make sure that your tags file follow the [accepted tags file format](../../concepts/data-formats.md). +Submit a **POST** request using the following URL, headers, and JSON body to import your labels file. Make sure that your labels file follow the [accepted format](../../concepts/data-formats.md). + +If a project with the same name already exists, the data of that project is replaced. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:import?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}/:import?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | - +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -31,16 +32,18 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| + ### Body Use the following JSON in your request. Replace the placeholder values below with your own values. + ```json { - "api-version": "{API-VERSION}", + "projectFileVersion": "{API-VERSION}", "stringIndexType": "Utf16CodeUnit", "metadata": { "projectName": "{PROJECT-NAME}", - "projectKind": "customNamedEntityRecognition", + "projectKind": "CustomEntityRecognition", "description": "Trying out custom NER", "language": "{LANGUAGE-CODE}", "multilingual": true, @@ -48,6 +51,7 @@ Use the following JSON in your request. Replace the placeholder values below wit "settings": {} }, "assets": { + "projectKind": "CustomEntityRecognition", "entities": [ { "category": "Entity1" @@ -107,7 +111,7 @@ Use the following JSON in your request. Replace the placeholder values below wit |---------|---------|----------|--| | `api-version` | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | -| `projectKind` | `customNamedEntityRecognition` | Your project kind. | `customNamedEntityRecognition` | +| `projectKind` | `CustomEntityRecognition` | Your project kind. | `CustomEntityRecognition` | | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the documents. |`en-us`| | `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) for information on multilingual support. | `true`| | `storageInputContainerName` | {CONTAINER-NAME} | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | @@ -117,7 +121,7 @@ Use the following JSON in your request. Replace the placeholder values below wit | `dataset` | `{DATASET}` | The test set to which this file will go to when split before training. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information on how your data is split. Possible values for this field are `Train` and `Test`. |`Train`| -Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/import/jobs/{JOB-ID}?api-version={API-VERSION} @@ -127,7 +131,7 @@ Once you send your API request, you’ll receive a `202` response indicating tha Possible error scenarios for this request: -* The selected resource doesn't have proper permission for the storage account. Learn more about [required permissions](../../how-to/create-project.md#create-a-language-resource) for storage account. +* The selected resource doesn't have [proper permissions](../../how-to/create-project.md#using-a-pre-existing-language-resource) for the storage account. * The `storageInputContainerName` specified doesn't exist. * Invalid language code is used, or if the language code type isn't string. -* `multilingual` value is string and not boolean. +* `multilingual` value is a string and not a boolean. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/model-evaluation.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/model-evaluation.md index 9ff034fd24ed..ddd828c65545 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/model-evaluation.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/model-evaluation.md @@ -18,7 +18,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -43,8 +43,8 @@ Once you send the request, you will get the following response. ```json { - "projectKind": "customNamedEntityRecognition", - "customNamedEntityRecognitionEvaluation": { + "projectKind": "CustomEntityRecognition", + "customEntityRecognitionEvaluation": { "confusionMatrix": { "additionalProp1": { "additionalProp1": { diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/project-details.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/project-details.md index 188a9f0ad7a9..dfa0b591e591 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/project-details.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/project-details.md @@ -11,8 +11,7 @@ ms.date: 05/06/2022 ms.author: aahi --- - -To get custom named entity recognition project details, submit a **GET** request using the following URL and headers. Replace the placeholder values with your own values. +Use the following **GET** request to get your project details. Replace the placeholder values below with your own values. ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}?api-version={API-VERSION} @@ -22,41 +21,39 @@ To get custom named entity recognition project details, submit a **GET** request |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | -### Headers +#### Headers Use the following header to authenticate your request. |Key|Value| |--|--| -|Ocp-Apim-Subscription-Key| The key to your resource. Used for authenticating your API requests.| +|`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -### Response Body +#### Response body -Once you send the request, you will get the following response. ```json -{ - "createdDateTime": "2022-04-23T13:39:09.384Z", - "lastModifiedDateTime": "2022-04-23T13:39:09.384Z", - "lastTrainedDateTime": "2022-04-23T13:39:09.384Z", - "lastDeployedDateTime": "2022-04-23T13:39:09.384Z", - "projectKind": "customNamedEntityRecognition", - "storageInputContainerName": "string", - "settings": {}, - "projectName": "string", - "multilingual": true, - "description": "string", - "language": "string" -} - + { + "createdDateTime": "2021-10-19T23:24:41.572Z", + "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", + "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", + "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", + "projectKind": "CustomEntityRecognition", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project description", + "language": "{LANGUAGE-CODE}" + } ``` + |Value | Placeholder | Description | Example | |---------|---------|---------|---------| -| `projectKind` | `customNamedEntityRecognition` | Your project kind. | `customNamedEntityRecognition` | +| `projectKind` | `CustomEntityRecognition` | Your project kind. | `CustomEntityRecognition` | | `storageInputContainerName` | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. For more information about multilingual support, see [Language support](../../language-support.md#multi-lingual-option). | `true`| | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the documents. |`en-us`| -Once you send your API request, you will receive a `202` response indicating success and JSON response body with your project details. +Once you send your API request, you will receive a `200` response indicating success and JSON response body with your project details. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/submit-task.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/submit-task.md index acc461c28d75..6045a864e54d 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/submit-task.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/submit-task.md @@ -10,12 +10,17 @@ ms.date: 05/05/2022 ms.author: aahi --- -Use this **POST** request to submit an entity extraction task. +Use this **POST** request to start a text classification task. ```rest -{ENDPOINT}/text/analytics/v3.2-preview.2/analyze +{ENDPOINT}/language/analyze-text/jobs?api-version={API-VERSION} ``` +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + #### Headers |Key|Value| @@ -26,45 +31,46 @@ Use this **POST** request to submit an entity extraction task. ```json { - "displayName": "{JOB-NAME}", - "analysisInput": { - "documents": [ - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - }, - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - } - ] - }, - "tasks": { - "customEntityRecognitionTasks": [ - { - "parameters": { - "project-name": "`{PROJECT-NAME}`", - "deployment-name": "`{DEPLOYMENT-NAME}`" - } - } - ] + "displayName": "Extracting entities", + "analysisInput": { + "documents": [ + { + "id": "1", + "language": "{LANGUAGE-CODE}", + "text": "Text1" + }, + { + "id": "2", + "language": "{LANGUAGE-CODE}", + "text": "Text2" + } + ] + }, + "tasks": [ + { + "kind": "CustomEntityRecognition", + "taskName": "Entity Recognition", + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}" + } } + ] } ``` + |Key |Placeholder |Value | Example | |---------|---------|----------|--| | `displayName` | `{JOB-NAME}` | Your job name. | `MyJobName` | | `documents` | [{},{}] | List of documents to run tasks on. | `[{},{}]` | | `id` | `{DOC-ID}` | Document name or ID. | `doc1`| -| `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the document. In case this key is not specified, the service will assume the default language of the project that was selected during project creation. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| +| `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the document. If this key isn't specified, the service will assume the default language of the project that was selected during project creation. See [language support](../../language-support.md) for a list of supported language codes. |`en-us`| | `text` | `{DOC-TEXT}` | Document task to run the tasks on. | `Lorem ipsum dolor sit amet` | -|`tasks`|`[]`| List of tasks we want to perform.|`[]`| -| |customEntityRecognitionTasks|Task identifer for task we want to perform. | | -|`parameters`|`[]`|List of parameters to pass to task|`[]`| +|`tasks`| | List of tasks we want to perform.|`[]`| +| `taskName`|`CustomEntityRecognition`|The task name|CustomEntityRecognition| +|`parameters`| |List of parameters to pass to the task.| | | `project-name` |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | | `deployment-name` |`{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `prod` | @@ -75,7 +81,7 @@ You will receive a 202 response indicating that your task has been submitted suc `operation-location` is formatted like this: ```rest -{ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/{JOB-ID} +{ENDPOINT}/language/analyze-text/jobs/{JOB-ID}?api-version={API-VERSION} ``` You can use this URL to query the task completion status and get the results when task is completed. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/swap-deployment.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/swap-deployment.md index 2447a4ecc3f9..3b8ed42317b6 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/swap-deployment.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/swap-deployment.md @@ -25,7 +25,7 @@ Create a **POST** request using the following URL, headers, and JSON body to sta |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#api-versions) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -35,9 +35,9 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | -### Request body + +### Request Body ```json { @@ -47,10 +47,10 @@ Use the following header to authenticate your request. ``` -|Key| value| Example| -|--|--|--| -|firstDeploymentName | The name for your first deployment. This value is case-sensitive. | `production` | -|secondDeploymentName | The name for your second deployment. This value is case-sensitive. | `staging` | +|Key|Placeholder| Value| Example| +|--|--|--|--| +|firstDeploymentName |`{FIRST-DEPLOYMENT-NAME}`| The name for your first deployment. This value is case-sensitive. | `production` | +|secondDeploymentName | `{SECOND-DEPLOYMENT-NAME}`|The name for your second deployment. This value is case-sensitive. | `staging` | Once you send your API request, you will receive a `202` response indicating success. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/train-model.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/train-model.md index 42b01d6d8eaa..2c4ae74a9be4 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/train-model.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/includes/rest-api/train-model.md @@ -20,7 +20,7 @@ Submit a **POST** request using the following URL, headers, and JSON body to sub |---------|---------|---------| | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -34,27 +34,26 @@ Use the following header to authenticate your request. Use the following JSON in your request body. The model will be given the `{MODEL-NAME}` once training is complete. Only successful training jobs will produce models. + ```json { "modelLabel": "{MODEL-NAME}", "trainingConfigVersion": "{CONFIG-VERSION}", "evaluationOptions": { - "kind": "percentage", + "kind": "percentage", "trainingSplitPercentage": 80, "testingSplitPercentage": 20 - } } - ``` |Key |Placeholder |Value | Example | |---------|---------|-----|----| -| `modelLabel` | `{MODEL-NAME}` | The model name that will be assigned to your model once trained successfully. | `myModel` | -| `trainingConfigVersion` | `{CONFIG-VERSION}` | This is the [model version](../../../concepts/model-lifecycle.md) that will be used to train the model. | `2022-05-01` | -| `evaluationOptions` | `{}` | Option to split your data across training and testing sets. | `{}` | -| `kind` | `percentage` | Split methods. Possible Values are `percentage` or `manual`. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information on how your data is split. |`percentage`| -| `trainingSplitPercentage` | `80`| Percentage of your tagged data to be included in the training set. Recommended value is `80`. | `80`| -| `testingSplitPercentage` | `20` | Percentage of your tagged data to be included in the testing set. Recommended value is `20`. | `20` | +| modelLabel | `{MODEL-NAME}` | The model name that will be assigned to your model once trained successfully. | `myModel` | +| trainingConfigVersion | `{CONFIG-VERSION}` | This is the [model version](../../../concepts/model-lifecycle.md) that will be used to train the model. | `2022-05-01`| +| evaluationOptions | | Option to split your data across training and testing sets. | `{}` | +| kind | `percentage` | Split methods. Possible values are `percentage` or `manual`. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information. |`percentage`| +| trainingSplitPercentage | `80`| Percentage of your tagged data to be included in the training set. Recommended value is `80`. | `80`| +| testingSplitPercentage | `20` | Percentage of your tagged data to be included in the testing set. Recommended value is `20`. | `20` | > [!NOTE] > The `trainingSplitPercentage` and `testingSplitPercentage` are only required if `Kind` is set to `percentage` and the sum of both percentages should be equal to 100. diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/confusion-matrix-example.png b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/confusion-matrix-example.png index e0ac8297da15..161b669bd5c2 100644 Binary files a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/confusion-matrix-example.png and b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/confusion-matrix-example.png differ diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/review-predictions.png b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/review-predictions.png index 1f8942a8cfb1..3e98d419233e 100644 Binary files a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/review-predictions.png and b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/review-predictions.png differ diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/tag-options.png b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/tag-options.png index a7586d561e16..b74d4e1ec7df 100644 Binary files a/articles/cognitive-services/language-service/custom-named-entity-recognition/media/tag-options.png and b/articles/cognitive-services/language-service/custom-named-entity-recognition/media/tag-options.png differ diff --git a/articles/cognitive-services/language-service/custom-named-entity-recognition/overview.md b/articles/cognitive-services/language-service/custom-named-entity-recognition/overview.md index 420a3cd12ead..32adc1d2e345 100644 --- a/articles/cognitive-services/language-service/custom-named-entity-recognition/overview.md +++ b/articles/cognitive-services/language-service/custom-named-entity-recognition/overview.md @@ -79,7 +79,7 @@ As you use custom NER, see the following reference documentation and samples for ## Responsible AI -An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Read the [transparency note for custom NER]() to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: +An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Read the [transparency note for custom NER](/legal/cognitive-services/language-service/cner-transparency-note?context=/azure/cognitive-services/language-service/context/context) to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: [!INCLUDE [Responsible AI links](../includes/overview-responsible-ai-links.md)] diff --git a/articles/cognitive-services/language-service/custom-text-classification/how-to/tag-data.md b/articles/cognitive-services/language-service/custom-text-classification/how-to/tag-data.md index 30c1b7896153..3e0c95f6e8ae 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/how-to/tag-data.md +++ b/articles/cognitive-services/language-service/custom-text-classification/how-to/tag-data.md @@ -58,8 +58,6 @@ Use the following steps to label your data: 4. In the right side pane, **Add class** to your project so you can start labeling your data with them. - :::image type="content" source="../media/tag-1.png" alt-text="A screenshot showing the data labeling screen" lightbox="../media/tag-1.png"::: - 5. Start labeling your files. # [Multi label classification](#tab/multi-classification) diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/resource-creation-azure-portal.md b/articles/cognitive-services/language-service/custom-text-classification/includes/resource-creation-azure-portal.md index 9c6158305548..f563e284f71b 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/resource-creation-azure-portal.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/resource-creation-azure-portal.md @@ -32,11 +32,11 @@ ms.custom: language-service-custom-classification, event-tier1-build-2022 |Location | Learn more about [supported regions](../service-limits.md#regional-availability). | |Pricing tier | Learn more about [supported pricing tiers](../service-limits.md#pricing-tiers). | -7. In the **Custom text classification & custom named entity recognition** section, select an existing storage account or select **Create a new storage account**. Note that these values are to help you get started, and not necessarily the [storage account values](/azure/storage/common/storage-account-overview) you’ll want to use in production environments. To avoid latency during building your project connect to storage accounts in the same region as your Language resource. +7. In the **Custom text classification & custom named entity recognition** section, select an existing storage account or select **Create a new storage account**. Note that these values are to help you get started, and not necessarily the [storage account values](../../../../storage/common/storage-account-overview.md) you’ll want to use in production environments. To avoid latency during building your project connect to storage accounts in the same region as your Language resource. |Storage account value |Recommended value | |---------|---------| | Name | Any name | | Account kind| Storage (general purpose v1) | | Performance | Standard | - | Replication | Locally redundant storage (LRS) | + | Replication | Locally redundant storage (LRS) | \ No newline at end of file diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/cancel-training.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/cancel-training.md index 61249eeb3c01..96fa6a13bc24 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/cancel-training.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/cancel-training.md @@ -17,7 +17,7 @@ Create a **POST** request by using the following URL, headers, and JSON body to Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,7 +25,7 @@ Use the following URL when creating your API request. Replace the placeholder va |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | |`{JOB-ID}` | This value is the training job ID.| `XXXXX-XXXXX-XXXX-XX`| -|`{API-VERSION}` | The version of the API you're calling. The value referenced is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you're calling. The value referenced is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -35,4 +35,4 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -After you send your API request, you'll receive a 204 response indicating success, which means your training job has been canceled. +After you send your API request, you'll receive a 202 response with an `Operation-Location` header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/create-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/create-project.md index 53b1cbba8d52..b7490b169ec7 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/create-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/create-project.md @@ -12,19 +12,22 @@ To start creating a custom text classification model, you need to create a proje > [!NOTE] > The project name is case-sensitive for all operations. -Create a **POST** request using the following URL, headers, and JSON body to create your project and import the labels file. +Create a **PATCH** request using the following URL, headers, and JSON body to create your project. ### Request URL -Use the following URL to create a project and import your labels file. Replace the placeholder values below with your own values. +Use the following URL to create a project. Replace the placeholder values below with your own values. ```rest -{YOUR-ENDPOINT}/language/analyze-text/projects/{projectName}/:import?api-version=2021-11-01-preview +{Endpoint}/language/authoring/analyze-text/projects/{projectName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| -|`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + ### Headers @@ -38,54 +41,54 @@ Use the following header to authenticate your request. Use the following JSON in your request. Replace the placeholder values below with your own values. +# [Multi label classification](#tab/multi-classification) + ```json { - "api-version": "2021-11-01-preview", - "metadata": { - "name": "MyProject", - "multiLingual": true, - "description": "Trying out custom text classification", - "modelType": "multiClassification", - "language": "string", - "storageInputContainerName": "YOUR-CONTAINER-NAME", - "settings": {} - }, - "assets": { - "classifiers": [ - { - "name": "Class1" - } - ], - "documents": [ - { - "location": "doc1.txt", - "language": "en-us", - "dataset": "Train", - "classifiers": [ - { - "classifierName": "Class1" - } - ] - } - ] - } + "projectName": "{PROJECT-NAME}", + "language": "{LANGUAGE-CODE}", + "projectKind": "customMultiLabelClassification", + "description": "Project description", + "multilingual": "True", + "storageInputContainerName": "{CONTAINER-NAME}" } + ``` -For the metadata key: -|Key |Value | Example | -|---------|---------|---------| -| `modelType ` | Your Model type, for single label classification use `singleClassification`. | multiClassification | -|`storageInputContainerName` | The name of your Azure blob storage container. | `myContainer` | +|Key |Placeholder|Value | Example | +|---------|---------|---------|--| +| projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | +| language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| +| projectKind | `customMultiLabelClassification` | Your project kind. | `customMultiLabelClassification` | +| multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| +| storageInputContainerName | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | -For the documents key: +--- -|Key |Value | Example | -|---------|---------|---------| -| `location` | Document name on the blob store. | `doc2.txt` | -|`language` | The language of the document. | `en-us` | -|`dataset` | Optional field to specify the dataset that this document will belong to. | `Train` or `Test` | +# [Single label classification](#tab/single-classification) -This request will return an error if: +```json +{ + "projectName": "{PROJECT-NAME}", + "language": "{LANGUAGE-CODE}", + "projectKind": "customSingleLabelClassification", + "description": "Project description", + "multilingual": "True", + "storageInputContainerName": "{CONTAINER-NAME}" +} +``` +|Key |Placeholder|Value | Example | +|---------|---------|---------|--| +| projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | +| language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| +| projectKind | `customSingleLabelClassification` | Your project kind. | `customSingleLabelClassification` | +| multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| +| storageInputContainerName | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | + +--- +This request will return a 201 response, which means that the project is created. + + +This request will return an error if: * The selected resource doesn't have proper permission for the storage account. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-deployment.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-deployment.md index 5ad5b4254419..6a7835dfcae0 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-deployment.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-deployment.md @@ -18,7 +18,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/deployments/{deploymentName}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{deploymentName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment name. This value is case-sensitive. | `prod` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -37,4 +37,6 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. +Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. A successful call results with an `Operation-Location` header used to check the status of the job. + + diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-model.md index d05a551424a8..9edc29f98234 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-model.md @@ -12,13 +12,13 @@ ms.author: aahi -Create a **DELETE** request using the following URL, headers, and JSON body to delete a model. +Create a **DELETE** request using the following URL, headers, and JSON body to delete a trained model. ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your model name. This value is case-sensitive. | `model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -37,4 +37,4 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your model has been deleted. +Once you send your API request, you will receive a `204` response indicating success, which means your trained model has been deleted. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-project.md index 99589d0a4456..974394d4a0e3 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/delete-project.md @@ -12,14 +12,14 @@ ms.author: aahi When you no longer need your project, you can delete it with the following **DELETE** request. Replace the placeholder values with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -30,4 +30,4 @@ Use the following header to authenticate your request. |Ocp-Apim-Subscription-Key| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. +Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. A successful call results with an `Operation-Location` header used to check the status of the job. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/deploy-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/deploy-model.md index d7dc85af40e5..c374499d7a43 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/deploy-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/deploy-model.md @@ -13,7 +13,7 @@ ms.author: aahi Submit a **PUT** request using the following URL, headers, and JSON body to submit a deployment job. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} ``` | Placeholder |Value | Example | @@ -21,7 +21,7 @@ Submit a **PUT** request using the following URL, headers, and JSON body to subm | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers @@ -45,7 +45,7 @@ Use the following JSON in the body of your request. Use the name of the model yo |---------|---------|-----|----| | trainedModelLabel | `{MODEL-NAME}` | The model name that will be assigned to your deployment. You can only assign successfully trained models. This value is case-sensitive. | `myModel` | -Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/export-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/export-project.md index 1c6d058cca3a..60144a9f98c4 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/export-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/export-project.md @@ -18,14 +18,14 @@ Create a **POST** request using the following URL, headers, and JSON body to exp Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/:export?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:export?stringIndexType=Utf16CodeUnit&api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `MyProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-deployment-status.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-deployment-status.md index 2b6d73dbe80f..2802c682324d 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-deployment-status.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-deployment-status.md @@ -22,7 +22,7 @@ Use the following **GET** request to query the status of the deployment job. You | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | `{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `staging` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers @@ -35,7 +35,7 @@ Use the following header to authenticate your request. ### Response Body -Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". +Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". You should get a `200` code to indicate the success of the request. ```json { diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-export-status.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-export-status.md index 0a42cbeeb725..2d1041f06e8f 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-export-status.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-export-status.md @@ -22,7 +22,7 @@ Use the following **GET** request to get the status of exporting your project as |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -38,9 +38,9 @@ Use the following header to authenticate your request. { "resultUrl": "{RESULT-URL}", "jobId": "string", - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", - "expirationDateTime": "2021-10-19T23:24:41.572Z", + "createdDateTime": "2021-10-19T23:24:41.572Z", + "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", + "expirationDateTime": "2021-10-19T23:24:41.572Z", "status": "unknown", "errors": [ { diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-import-status.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-import-status.md index 87a0fe43a83e..7f7f544fb05f 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-import-status.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-import-status.md @@ -22,7 +22,7 @@ Use the following **GET** request to get the status of your importing your proje |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-project-details.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-project-details.md index f74204df65b8..faab888e356b 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-project-details.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-project-details.md @@ -20,7 +20,7 @@ Use the following **GET** request to get your project details. Replace the place |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers @@ -38,12 +38,13 @@ Use the following header to authenticate your request. "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", - "modelType": "{MODEL-TYPE}", + "projectKind": "customMultiLabelClassification", "storageInputContainerName": "{CONTAINER-NAME}", - "name": "myProject", - "multiLingual": true, - "description": "string", - "language": "en-us", - "settings": {} + "projectName": "{PROJECT-NAME}", + "multilingual": false, + "description": "Project description", + "language": "{LANGUAGE-CODE}" } ``` +Once you send your API request, you will receive a `200` response indicating success and JSON response body with your project details. + diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-results.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-results.md index 7ab2b71b81ae..21112c68c66b 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-results.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-results.md @@ -9,10 +9,15 @@ ms.date: 05/04/2022 ms.author: aahi --- +Use the following **GET** request to query the status/results of the text classification task. -Use the following **GET** request to query the status/results of the custom classification task. You can use the endpoint you received from the previous step. - -`{ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/`. +```rest +{ENDPOINT}/language/analyze-text/jobs/{JOB-ID}?api-version={API-VERSION} +``` +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) for more information on other available API versions. | `2022-05-01` | #### Headers @@ -28,67 +33,44 @@ The response will be a JSON document with the following parameters. ```json { - "createdDateTime": "2021-05-19T14:32:25.578Z", - "displayName": "{JOB-NAME}", - "expirationDateTime": "2021-05-19T14:32:25.578Z", - "jobId": "3fa85f64-5717-4562-b3fc-2c963f66afa6", - "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", - "status": "completed", - "errors": [], - "tasks": { - "details": { - "name": "{JOB-NAME}", - "lastUpdateDateTime": "2021-03-29T19:50:23Z", - "status": "completed" - }, - "completed": 1, - "failed": 0, - "inProgress": 0, - "total": 1, - "tasks": { - "customMultiClassificationTasks": [ - { - "lastUpdateDateTime": "2021-05-19T14:32:25.579Z", - "name": "{JOB-NAME}", - "status": "completed", - "results": { - "documents": [ - { - "id": "{DOC-ID}", - "classes": [ - { - "category": "Class_1", - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - }, - { - "id": "{DOC-ID}", - "classes": [ - { - "category": "Class_1", - "confidenceScore": 0.0551877357 - }, - { - "category": "Class_2", - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - } - ], - "errors": [], - "statistics": { - "documentsCount":0, - "erroneousDocumentsCount":0, - "transactionsCount":0 - } - } - } - ] + "createdDateTime": "2021-05-19T14:32:25.578Z", + "displayName": "MyJobName", + "expirationDateTime": "2021-05-19T14:32:25.578Z", + "jobId": "xxxx-xxxxxx-xxxxx-xxxx", + "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", + "status": "succeeded", + "tasks": { + "completed": 1, + "failed": 0, + "inProgress": 0, + "total": 1, + "items": [ + { + "kind": "customMultiClassificationTasks", + "taskName": "Classify documents", + "lastUpdateDateTime": "2020-10-01T15:01:03Z", + "status": "succeeded", + "results": { + "documents": [ + { + "id": "{DOC-ID}", + "classes": [ + { + "category": "Class_1", + "confidenceScore": 0.0551877357 + } + ], + "warnings": [] + } + ], + "errors": [], + "modelVersion": "2020-04-01" } - } + } + ] + } +} + ``` # [Single label classification](#tab/single-classification) @@ -96,63 +78,44 @@ The response will be a JSON document with the following parameters. ```json { - "createdDateTime": "2021-05-19T14:32:25.578Z", - "displayName": "{JOB-NAME}", - "expirationDateTime": "2021-05-19T14:32:25.578Z", - "jobId": "3fa85f64-5717-4562-b3fc-2c963f66afa6", - "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", - "status": "completed", - "errors": [], - "tasks": { - "details": { - "name": "{JOB-NAME}", - "lastUpdateDateTime": "2021-03-29T19:50:23Z", - "status": "completed" - }, - "completed": 1, - "failed": 0, - "inProgress": 0, - "total": 1, - "tasks": { - "customSingleClassificationTasks": [ - { - "lastUpdateDateTime": "2021-05-19T14:32:25.579Z", - "name": "{JOB-NAME}", - "status": "completed", - "results": { - "documents": [ - { - "id": "{DOC-ID}", - "classes": [ - { - "category": "Class_1", - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - }, - { - "id": "{DOC-ID}", - "classes": [ - { - "category": "Class_2", - "confidenceScore": 0.0551877357 - } - ], - "warnings": [] - } - ], - "errors": [], - "statistics": { - "documentsCount":0, - "erroneousDocumentsCount":0, - "transactionsCount":0 - } - } - } - ] + "createdDateTime": "2021-05-19T14:32:25.578Z", + "displayName": "MyJobName", + "expirationDateTime": "2021-05-19T14:32:25.578Z", + "jobId": "xxxx-xxxxxx-xxxxx-xxxx", + "lastUpdateDateTime": "2021-05-19T14:32:25.578Z", + "status": "succeeded", + "tasks": { + "completed": 1, + "failed": 0, + "inProgress": 0, + "total": 1, + "items": [ + { + "kind": "customSingleClassificationTasks", + "taskName": "Classify documents", + "lastUpdateDateTime": "2020-10-01T15:01:03Z", + "status": "succeeded", + "results": { + "documents": [ + { + "id": "{DOC-ID}", + "classes": [ + { + "category": "Class_1", + "confidenceScore": 0.0551877357 + } + ], + "warnings": [] + } + ], + "errors": [], + "modelVersion": "2020-04-01" } - } + } + ] + } +} + ``` --- diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-training-status.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-training-status.md index d67f8776f9d4..43407cf354ff 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-training-status.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/get-training-status.md @@ -22,7 +22,7 @@ Use the following **GET** request to get the status of your model's training pro |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your model's training status. This value is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | #### Headers @@ -38,39 +38,25 @@ Once you send the request, you’ll get the following response. ```json { - "jobs": [ - { - "result": { - "trainedModelLabel": "{MODEL-NAME}", - "trainingConfigVersion": "string", - "trainStatus": { - "percentComplete": 0, - "elapsedTime": "string" - }, - "evaluationStatus": { - "percentComplete": 0, - "elapsedTime": "string" - } - }, - "jobId": "string", - "createdDateTime": "2022-04-12T12:13:28.771Z", - "lastUpdatedDateTime": "2022-04-12T12:13:28.771Z", - "expirationDateTime": "2022-04-12T12:13:28.771Z", - "status": "unknown", - "warnings": [ - { - "code": "unknown", - "message": "string" - } - ], - "errors": [ - { - "code": "unknown", - "message": "string" - } - ] + "result": { + "modelLabel": "{MODEL-NAME}", + "trainingConfigVersion": "{CONFIG-VERSION}", + "estimatedEndDateTime": "2022-04-18T15:47:58.8190649Z", + "trainingStatus": { + "percentComplete": 3, + "startDateTime": "2022-04-18T15:45:06.8190649Z", + "status": "running" + }, + "evaluationStatus": { + "percentComplete": 0, + "status": "notStarted" } - ] + }, + "jobId": "{JOB-ID}", + "createdDateTime": "2022-04-18T15:44:44Z", + "lastUpdatedDateTime": "2022-04-18T15:45:48Z", + "expirationDateTime": "2022-04-25T15:44:44Z", + "status": "running" } ``` diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/import-project.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/import-project.md index 4716bfceae8c..4d72c9a68e43 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/import-project.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/import-project.md @@ -11,16 +11,17 @@ ms.author: aahi Submit a **POST** request using the following URL, headers, and JSON body to import your labels file. Make sure that your labels file follow the [accepted format](../../concepts/data-formats.md). +If a project with the same name already exists, the data of that project is replaced. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:import?api-version={API-VERSION} +{Endpoint}/language/authoring/analyze-text/projects/{projectName}/:import?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers @@ -39,18 +40,19 @@ Use the following JSON in your request. Replace the placeholder values below wit ```json { - "api-version": "{API-VERSION}", + "projectFileVersion": "{API-VERSION}", "stringIndexType": "Utf16CodeUnit", "metadata": { "projectName": "{PROJECT-NAME}", + "storageInputContainerName": "{CONTAINER-NAME}", "projectKind": "customMultiLabelClassification", "description": "Trying out custom multi label text classification", "language": "{LANGUAGE-CODE}", "multilingual": true, - "storageInputContainerName": "{CONTAINER-NAME}", "settings": {} }, "assets": { + "projectKind": "customMultiLabelClassification", "classes": [ { "category": "Class1" @@ -90,12 +92,12 @@ Use the following JSON in your request. Replace the placeholder values below wit ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| -| api-version | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +| api-version | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | | projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | projectKind | `customMultiLabelClassification` | Your project kind. | `customMultiLabelClassification` | | language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. |`en-us`| | multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| -| storageInputContainerName | {CONTAINER-NAME} | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | +| storageInputContainerName | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | | classes | [] | Array containing all the classes you have in the project. These are the classes you want to classify your documents into.| [] | | documents | [] | Array containing all the documents in your project and what the classes labeled for this document. | [] | | location | `{DOCUMENT-NAME}` | The location of the documents in the storage container. Since all the documents are in the root of the container this should be the document name.|`doc1.txt`| @@ -106,18 +108,19 @@ Use the following JSON in your request. Replace the placeholder values below wit ```json { - "api-version": "{API-VERSION}", - "stringIndexType": "Utf16CodeUnit", - "metadata": { - "projectName": "{PROJECT-NAME}", - "projectKind": "customSingleLabelClassification", - "description": "Trying out custom single label text classification", - "language": "{LANGUAGE-CODE}", - "multilingual": true, - "storageInputContainerName": "{CONTAINER-NAME}", - "settings": {} - }, - "assets": { + "projectFileVersion": "{API-VERSION}", + "stringIndexType": "Utf16CodeUnit", + "metadata": { + "projectName": "{PROJECT-NAME}", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectKind": "customSingleLabelClassification", + "description": "Trying out custom multi label text classification", + "language": "{LANGUAGE-CODE}", + "multilingual": true, + "settings": {} + }, + "assets": { + "projectKind": "customSingleLabelClassification", "classes": [ { "category": "Class1" @@ -149,12 +152,12 @@ Use the following JSON in your request. Replace the placeholder values below wit ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| -| api-version | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. | `2022-03-01-preview` | +| api-version | `{API-VERSION}` | The version of the API you are calling. The version used here must be the same API version in the URL. | `2022-05-01` | | projectName | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | | projectKind | `customSingleLabelClassification` | Your project kind. | `customSingleLabelClassification` | | language | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| | multilingual | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language (not necessarily included in your training documents. See [language support](../../language-support.md#multi-lingual-option) to learn more about multilingual support. | `true`| -| storageInputContainerName | {CONTAINER-NAME} | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | +| storageInputContainerName | `{CONTAINER-NAME}` | The name of your Azure storage container where you have uploaded your documents. | `myContainer` | | classes | [] | Array containing all the classes you have in the project. These are the classes you want to classify your documents into.| [] | | documents | [] | Array containing all the documents in your project and which class this document belongs to. | [] | | location | `{DOCUMENT-NAME}` | The location of the documents in the storage container. Since all the documents are in the root of the container this should be the document name.|`doc1.txt`| @@ -162,7 +165,7 @@ Use the following JSON in your request. Replace the placeholder values below wit --- -Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you’ll receive a `202` response indicating that the job was submitted correctly. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest {ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/import/jobs/{JOB-ID}?api-version={API-VERSION} diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-multi-label.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-multi-label.md index d1a15c4adc39..67ab2d2c7554 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-multi-label.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-multi-label.md @@ -18,7 +18,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-single-label.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-single-label.md index 31e7ac26266c..fa54e558fd7c 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-single-label.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/model-evaluation-single-label.md @@ -18,7 +18,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get ### Request URL ```rest -{ENDPOINT}/language/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Submit a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/project-details.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/project-details.md index 718a6e7484cd..175eb4f35c9f 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/project-details.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/project-details.md @@ -22,7 +22,7 @@ To get custom text classification project details, submit a **GET** request usin |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -42,12 +42,11 @@ Once you send the request, you will get the following response. "lastTrainedDateTime": "2022-04-23T13:39:09.384Z", "lastDeployedDateTime": "2022-04-23T13:39:09.384Z", "projectKind": "customSingleLabelClassification", - "storageInputContainerName": "string", - "settings": {}, - "projectName": "string", + "storageInputContainerName": "{CONTAINER-NAME}", + "projectName": "{PROJECT-NAME}", "multilingual": true, - "description": "string", - "language": "string" + "description": "Project description", + "language": "{LANGUAGE-CODE}" } ``` @@ -60,4 +59,4 @@ Once you send the request, you will get the following response. | `multilingual` | | A boolean value that enables you to have documents in multiple languages in your dataset. When your model is deployed, you can query the model in any supported language (not necessarily included in your training documents. For more information on multilingual support, see [language support](../../language-support.md#multi-lingual-option). | `true`| | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the documents used in your project. If your project is a multilingual project, choose the language code of the majority of the documents. See [language support](../../language-support.md) to learn more about supported language codes. |`en-us`| -Once you send your API request, you'll receive a `202` response indicating success and JSON response body with your project details. +Once you send your API request, you'll receive a `200` response indicating success and JSON response body with your project details. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/submit-task.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/submit-task.md index 3d5d86c1c8dd..8e50d3d056a8 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/submit-task.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/submit-task.md @@ -12,9 +12,14 @@ ms.author: aahi Use this **POST** request to start a text classification task. ```rest -{ENDPOINT}/text/analytics/v3.2-preview.2/analyze +{ENDPOINT}/language/analyze-text/jobs?api-version={API-VERSION} ``` +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + #### Headers |Key|Value| @@ -27,35 +32,34 @@ Use this **POST** request to start a text classification task. ```json { - "displayName": "{JOB-NAME}", - "analysisInput": { - "documents": [ - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - }, - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - } - ] - }, - "tasks": { - "customMultiLabelClassificationTasks": [ - { - "parameters": { - "project-name": "{PROJECT-NAME}", - "deployment-name": "{DEPLOYMENT-NAME}" - } - } - ] + "displayName": "Classifying documents", + "analysisInput": { + "documents": [ + { + "id": "1", + "language": "{LANGUAGE-CODE}", + "text": "Text1" + }, + { + "id": "2", + "language": "{LANGUAGE-CODE}", + "text": "Text2" + } + ] + }, + "tasks": [ + { + "kind": "CustomMultiLabelClassification", + "taskName": "Multi Label Classification", + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}" + } } + ] } ``` - |Key |Placeholder |Value | Example | |---------|---------|----------|--| | `displayName` | `{JOB-NAME}` | Your job name. | `MyJobName` | @@ -64,7 +68,7 @@ Use this **POST** request to start a text classification task. | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the document. If this key isn't specified, the service will assume the default language of the project that was selected during project creation. See [language support](../../language-support.md) for a list of supported language codes. |`en-us`| | `text` | `{DOC-TEXT}` | Document task to run the tasks on. | `Lorem ipsum dolor sit amet` | |`tasks`| | List of tasks we want to perform.|`[]`| -| `customMultiLabelClassificationTasks` | |Task identifier for task we want to perform. | | +| `taskName`|CustomMultiLabelClassification|The task name|CustomMultiLabelClassification| |`parameters`| |List of parameters to pass to the task.| | | `project-name` |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | | `deployment-name` |`{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `prod` | @@ -73,31 +77,31 @@ Use this **POST** request to start a text classification task. ```json { - "displayName": "{JOB-NAME}", - "analysisInput": { - "documents": [ - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - }, - { - "id": "{DOC-ID}", - "language": "{LANGUAGE-CODE}", - "text": "{DOC-TEXT}" - } - ] - }, - "tasks": { - "customSingleLabelClassificationTasks": [ - { - "parameters": { - "project-name": "`{PROJECT-NAME}`", - "deployment-name": "`{DEPLOYMENT-NAME}`" - } - } - ] + "displayName": "Classifying documents", + "analysisInput": { + "documents": [ + { + "id": "1", + "language": "{LANGUAGE-CODE}", + "text": "Text1" + }, + { + "id": "2", + "language": "{LANGUAGE-CODE}", + "text": "Text2" + } + ] + }, + "tasks": [ + { + "kind": "CustomSingleLabelClassification", + "taskName": "Single Classification Label", + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}" + } } + ] } ``` @@ -108,8 +112,8 @@ Use this **POST** request to start a text classification task. | `id` | `{DOC-ID}` | Document name or ID. | `doc1`| | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the document. If this key isn't specified, the service will assume the default language of the project that was selected during project creation. See [language support](../../language-support.md) for a list of supported language codes. |`en-us`| | `text` | `{DOC-TEXT}` | Document task to run the tasks on. | `Lorem ipsum dolor sit amet` | -|`tasks`| | List of tasks we want to perform.| | -| `customSingleLabelClassificationTasks` ||Task identifier for task we want to perform. | | +| `taskName`|CustomSingleLabelClassification|The task name|CustomSingleLabelClassification| +|`tasks`|[] | Array of tasks we want to perform.|[] | |`parameters`| |List of parameters to pass to the task.| | | `project-name` |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | | `deployment-name` |`{DEPLOYMENT-NAME}` | The name of your deployment. This value is case-sensitive. | `prod` | @@ -121,6 +125,6 @@ Use this **POST** request to start a text classification task. You will receive a 202 response indicating success. In the response **headers**, extract `operation-location`. `operation-location` is formatted like this: - `{YOUR-ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/` +`{ENDPOINT}/language/analyze-text/jobs/{JOB-ID}?api-version={API-VERSION}` You can use this URL to query the task completion status and get the results when task is completed. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/swap-deployment.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/swap-deployment.md index e7a188cb1399..958254031ced 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/swap-deployment.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/swap-deployment.md @@ -11,7 +11,6 @@ ms.custom: ignite-fall-2021, event-tier1-build-2022 --- - Create a **POST** request using the following URL, headers, and JSON body to start a swap deployments job. @@ -25,7 +24,7 @@ Create a **POST** request using the following URL, headers, and JSON body to sta |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -35,7 +34,7 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Request Body @@ -47,10 +46,10 @@ Use the following header to authenticate your request. ``` -|Key| Value| Example| -|--|--|--| -|firstDeploymentName | The name for your first deployment. This value is case-sensitive. | `production` | -|secondDeploymentName | The name for your second deployment. This value is case-sensitive. | `staging` | +|Key|Placeholder| Value| Example| +|--|--|--|--| +|firstDeploymentName |`{FIRST-DEPLOYMENT-NAME}`| The name for your first deployment. This value is case-sensitive. | `production` | +|secondDeploymentName | `{SECOND-DEPLOYMENT-NAME}`|The name for your second deployment. This value is case-sensitive. | `staging` | Once you send your API request, you will receive a `202` response indicating success. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/text-classification-task.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/text-classification-task.md index 3a3fe636083e..0a51d0a10871 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/text-classification-task.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/text-classification-task.md @@ -13,7 +13,13 @@ ms.author: aahi Use this **POST** request to text classification task. Replace `{projectName}` with the project name where you have the model you want to use. -`{YOUR-ENDPOINT}/text/analytics/v3.2-preview.2/analyze` +`{ENDPOINT}/language/:analyze-text?api-version={API-VERSION}` + +|Placeholder |Value | Example | +|---------|---------|---------| +|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | + #### Headers @@ -25,30 +31,22 @@ Use this **POST** request to text classification task. Replace `{projectName}` w ```json { - "displayName": "MyJobName", - "analysisInput": { - "documents": [ - { - "id": "doc1", - "text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc tempus, felis sed vehicula lobortis, lectus ligula facilisis quam, quis aliquet lectus diam id erat. Vivamus eu semper tellus. Integer placerat sem vel eros iaculis dictum. Sed vel congue urna." - }, - { - "id": "doc2", - "text": "Mauris dui dui, ultricies vel ligula ultricies, elementum viverra odio. Donec tempor odio nunc, quis fermentum lorem egestas commodo. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos." - } - ] - }, - "tasks": { - "customMultiClassificationTasks": [ - { - "parameters": { - "project-name": "MyProject", - "deployment-name": "MyDeploymentName" - "stringIndexType": "TextElements_v8" - } - } - ] - } + "kind": "customMultiClassificationTasks", + "parameters": { + "modelVersion": "{CONFIG-VERSION}" + }, + "analysisInput": { + "documents": [ + { + "id": "doc1", + "text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc tempus, felis sed vehicula lobortis, lectus ligula facilisis quam, quis aliquet lectus diam id erat. Vivamus eu semper tellus. Integer placerat sem vel eros iaculis dictum. Sed vel congue urna." + }, + { + "id": "doc2", + "text": "Mauris dui dui, ultricies vel ligula ultricies, elementum viverra odio. Donec tempor odio nunc, quis fermentum lorem egestas commodo. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos." + } + ] + } } ``` @@ -68,9 +66,9 @@ Replace the text of the document with movie summaries to classify. #### Response -You will receive a 202 response indicating success. In the response **headers**, extract `operation-location`. +You will receive a 200 response indicating success. In the response **headers**, extract `operation-location`. `operation-location` is formatted like this: - `{YOUR-ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/` + `{ENDPOINT}/text/analytics/v3.2-preview.2/analyze/jobs/` You will use this endpoint to get the custom text classification task results. diff --git a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/train-model.md b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/train-model.md index 9744eae72b86..1c221236baef 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/train-model.md +++ b/articles/cognitive-services/language-service/custom-text-classification/includes/rest-api/train-model.md @@ -19,7 +19,7 @@ Submit a **POST** request using the following URL, headers, and JSON body to sub |---------|---------|---------| | `{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. Learn more about other available [API versions](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-05-01` | #### Headers @@ -48,7 +48,7 @@ Use the following JSON in your request body. The model will be given the `{MODEL |Key |Placeholder |Value | Example | |---------|---------|-----|----| | modelLabel | `{MODEL-NAME}` | The model name that will be assigned to your model once trained successfully. | `myModel` | -| trainingConfigVersion | `{CONFIG-VERSION}` | This is the [model version](../../../concepts/model-lifecycle.md) that will be used to train the model. | `2022-05-01` | +| trainingConfigVersion | `{CONFIG-VERSION}` | This is the [model version](../../../concepts/model-lifecycle.md) that will be used to train the model. | `2022-05-01`| | evaluationOptions | | Option to split your data across training and testing sets. | `{}` | | kind | `percentage` | Split methods. Possible values are `percentage` or `manual`. See [How to train a model](../../how-to/train-model.md#data-splitting) for more information. |`percentage`| | trainingSplitPercentage | `80`| Percentage of your tagged data to be included in the training set. Recommended value is `80`. | `80`| diff --git a/articles/cognitive-services/language-service/custom-text-classification/media/multiple.png b/articles/cognitive-services/language-service/custom-text-classification/media/multiple.png index d473ac7f9dd2..ece8f39c728a 100644 Binary files a/articles/cognitive-services/language-service/custom-text-classification/media/multiple.png and b/articles/cognitive-services/language-service/custom-text-classification/media/multiple.png differ diff --git a/articles/cognitive-services/language-service/custom-text-classification/media/review-validation-set.png b/articles/cognitive-services/language-service/custom-text-classification/media/review-validation-set.png index ef3396e5e8db..ebb615bf433a 100644 Binary files a/articles/cognitive-services/language-service/custom-text-classification/media/review-validation-set.png and b/articles/cognitive-services/language-service/custom-text-classification/media/review-validation-set.png differ diff --git a/articles/cognitive-services/language-service/custom-text-classification/media/single.png b/articles/cognitive-services/language-service/custom-text-classification/media/single.png index 25638db4aae9..ee77d4a32006 100644 Binary files a/articles/cognitive-services/language-service/custom-text-classification/media/single.png and b/articles/cognitive-services/language-service/custom-text-classification/media/single.png differ diff --git a/articles/cognitive-services/language-service/custom-text-classification/media/tag-1.png b/articles/cognitive-services/language-service/custom-text-classification/media/tag-1.png deleted file mode 100644 index c5e822aa4cbf..000000000000 Binary files a/articles/cognitive-services/language-service/custom-text-classification/media/tag-1.png and /dev/null differ diff --git a/articles/cognitive-services/language-service/custom-text-classification/overview.md b/articles/cognitive-services/language-service/custom-text-classification/overview.md index 64d0dfeba09a..80bc5a148e02 100644 --- a/articles/cognitive-services/language-service/custom-text-classification/overview.md +++ b/articles/cognitive-services/language-service/custom-text-classification/overview.md @@ -79,7 +79,7 @@ As you use custom text classification, see the following reference documentation ## Responsible AI -An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Read the [transparency note for custom text classification]() to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: +An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Read the [transparency note for custom text classification](/legal/cognitive-services/language-service/ctc-transparency-note?context=/azure/cognitive-services/language-service/context/context) to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: [!INCLUDE [Responsible AI links](../includes/overview-responsible-ai-links.md)] diff --git a/articles/cognitive-services/language-service/orchestration-workflow/how-to/build-schema.md b/articles/cognitive-services/language-service/orchestration-workflow/how-to/build-schema.md index ff98945f077d..979f5c1e9c49 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/how-to/build-schema.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/how-to/build-schema.md @@ -34,7 +34,7 @@ Consider the following guidelines and recommendations for your project: To build a project schema within [Language Studio](https://aka.ms/languageStudio): -1. Select **Build schema** from the left side menu. +1. Select **Schema definition** from the left side menu. 2. To create an intent, select **Add** from the top menu. You will be prompted to type in a name for the intent. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/delete-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/delete-model.md index 408eccfebc39..0fa49a96d85e 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/delete-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/delete-model.md @@ -12,7 +12,7 @@ ms.author: aahi To delete your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **View model details** from the left side menu. +1. Select **Model performance** from the left side menu. 2. Click on the **model name** you want to delete and click **Delete** from the top menu. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/deploy-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/deploy-model.md index 6be65efceeed..4f1f2f89e029 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/deploy-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/deploy-model.md @@ -11,9 +11,9 @@ ms.author: aahi To deploy your model from within the [Language Studio](https://aka.ms/LanguageStudio): -1. Select **Deploy model** from the left side menu. +1. Select **Deploying a model** from the left side menu. -2. Click on **Start deployment job** to start a new deployment job. +2. Click on **Add deployment** to start a new deployment job. :::image type="content" source="../../media/add-deployment-model.png" alt-text="A screenshot showing the model deployment button in Language Studio." lightbox="../../media/add-deployment-model.png"::: diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/model-performance.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/model-performance.md index c8b568bbe68b..6b4d898a10ff 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/model-performance.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/language-studio/model-performance.md @@ -11,7 +11,7 @@ ms.author: aahi 1. Go to your project page in [Language Studio](https://aka.ms/languageStudio). -2. Select **View model details** from the menu on the left side of the screen. +2. Select **Model performance** from the menu on the left side of the screen. 3. In this page you can only view the successfully trained models, F1 score for each model and [model expiration date](../../../concepts/model-lifecycle.md#expiration-timeline). You can click on the model name for more details about its performance. @@ -25,4 +25,4 @@ You can click on the model name for more details about its performance. > [!NOTE] > If you don't see any of the intents you have in your model displayed here, it is because they weren't in any of the utterances that were used for the test set. - \ No newline at end of file + diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/cancel-training.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/cancel-training.md index 9d8902a768a6..8b011763f86e 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/cancel-training.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/cancel-training.md @@ -16,15 +16,15 @@ Create a **POST** request using the following URL, headers, and JSON body to can Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}/:cancel?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID} | This is the training job ID| |`XXXXX-XXXXX-XXXX-XX| -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest [released model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{JOB-ID}` | This is the training job ID |`XXXXX-XXXXX-XXXX-XX`| +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions | `2022-05-01` | ### Headers @@ -34,5 +34,6 @@ Use the following header to authenticate your request. |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a 204 response indicating success, which means your training job has been canceled. +Once you send your API request, you will receive a 202 response indicating success, which means your training job has been canceled. A successful call results with an Operation-Location header used to check the status of the job. + diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/create-project.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/create-project.md index df9daa738319..113cd2e90017 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/create-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/create-project.md @@ -23,7 +23,7 @@ Use the following URL when creating your API request. Replace the placeholder va |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -39,20 +39,15 @@ Use the following sample JSON as your body. ```json { - "projectKind": "orchestration", - "settings": { - "confidenceThreshold": 0 - }, "projectName": "{PROJECT-NAME}", - "multilingual": true, - "description": "Project description", - "language": "{LANGUAGE-CODE}" -} + "language": "{LANGUAGE-CODE}", + "projectKind": "Orchestration", + "description": "Project description" + } ``` |Key |Placeholder |Value | Example | |---------|---------|----------|--| | `projectName` | `{PROJECT-NAME}` | The name of your project. This value is case-sensitive. | `EmailApp` | | `language` | `{LANGUAGE-CODE}` | A string specifying the language code for the utterances used in your project. If your project is a multilingual project, choose the [language code](../../language-support.md) of the majority of the utterances. |`en-us`| -| `multilingual` | `true`| A boolean value that enables you to have documents in multiple languages in your dataset and when your model is deployed you can query the model in any supported language, not just ones included in your training documents. See [language support](../../language-support.md#multilingual-options). | `true`| diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-deployment.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-deployment.md index ce42deecb6f1..4617da5da38d 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-deployment.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-deployment.md @@ -11,14 +11,13 @@ ms.custom: ignite-fall-2021 --- - -Create a **DELETE** request using the following URL, headers, and JSON body to delete an orchestration workflow deployment. +Create a **DELETE** request using the following URL, headers, and JSON body to delete a conversational language understanding deployment. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/deployments/{deploymentName}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +25,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment name. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -35,7 +34,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | Once you send your API request, you will receive a `202` response indicating success, which means your deployment has been deleted. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-model.md index f879b09da2f0..46a3bce2632b 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-model.md @@ -18,7 +18,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -26,7 +26,7 @@ Create a **DELETE** request using the following URL, headers, and JSON body to d |`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your model name. This value is case-sensitive. | `model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -37,4 +37,4 @@ Use the following header to authenticate your request. |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -Once you send your API request, you will receive a `202` response indicating success, which means your model has been deleted. +Once you send your API request, you will receive a `204` response indicating success, which means your model has been deleted. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-project.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-project.md index debbb7655cfa..f27ebcad1eac 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/delete-project.md @@ -9,20 +9,20 @@ ms.date: 05/17/2022 ms.author: aahi --- -Create a **DELETE** request using the following URL, headers, and JSON body to delete an orchestration workflow project. +Create a **DELETE** request using the following URL, headers, and JSON body to delete a conversational language understanding project. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -31,7 +31,5 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | - Once you send your API request, you will receive a `202` response indicating success, which means your project has been deleted. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/deploy-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/deploy-model.md index 1ce4d3e135f5..6940f8c67818 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/deploy-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/deploy-model.md @@ -9,15 +9,12 @@ ms.date: 05/17/2022 ms.author: aahi --- - - Create a **PUT** request using the following URL, headers, and JSON body to start deploying an orchestration workflow model. - -### Request URL +#### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -25,36 +22,33 @@ Create a **PUT** request using the following URL, headers, and JSON body to star |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | -### Headers +#### Headers Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | -### Request Body +#### Request Body ```json { - "trainedModelLabel":"{MODEL-LABEL}" + "trainedModelLabel": "{MODEL-NAME}", } ``` +|Key |Placeholder |Value | Example | +|---------|---------|-----|----| +| trainedModelLabel | `{MODEL-NAME}` | The model name that will be assigned to your deployment. You can only assign successfully trained models. This value is case-sensitive. | `myModel` | -|Key| value| Example| -|--|--|--| -|`trainedModelLabel` | The name for your trained model. This value is case-sensitive. | `Model1` | - - -Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `location` value. It will be formatted like this: +Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` -`JOB-ID` is used to identify your request, since this operation is asynchronous. You will use this URL in the next step to get the training status. +You can use this URL to get the deployment job status. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/export-project.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/export-project.md index 701e24aa2248..27997bdae177 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/export-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/export-project.md @@ -16,14 +16,14 @@ Create a **POST** request using the following URL, headers, and JSON body to exp Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:export?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:export?stringIndexType=Utf16CodeUnit&api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -32,15 +32,13 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -| `format` | `clu` | Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` `JOB-ID` is used to identify your request, since this operation is asynchronous. Use this URL to get the exported project JSON, using the same authentication method. - diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-deployment-status.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-deployment-status.md index 273fb5568297..fac7b170d6b9 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-deployment-status.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-deployment-status.md @@ -10,13 +10,12 @@ ms.author: aahi ms.custom: ignite-fall-2021 --- -Use the following **GET** request to query the status of your model's deployment process. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. - +Use the following **GET** request to get the status of your deployment job. Replace the placeholder values below with your own values. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments/{DEPLOYMENT-NAME}/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -24,8 +23,8 @@ Use the following **GET** request to query the status of your model's deployment |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | -|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received from the API in response to your model deployment request. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-export-status.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-export-status.md index ff2418e8bba4..813549f4923c 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-export-status.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-export-status.md @@ -8,11 +8,10 @@ ms.topic: include ms.date: 05/19/2022 ms.author: aahi --- - Use the following **GET** request to query the status of your export job. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -20,7 +19,7 @@ Use the following **GET** request to query the status of your export job. You ca |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your export job status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -34,18 +33,12 @@ Use the following header to authenticate your request. ```json { - "resultUrl": "{RESULT-URL}", - "jobId": "string", - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastUpdatedDateTime": "2021-10-19T23:24:41.572Z", - "expirationDateTime": "2021-10-19T23:24:41.572Z", - "status": "unknown", - "errors": [ - { - "code": "unknown", - "message": "string" - } - ] + "resultUrl": "{Endpoint}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/xxxxxx-xxxxx-xxxxx-xx/result?api-version={API-VERSION}", + "jobId": "xxxx-xxxxx-xxxxx-xxx", + "createdDateTime": "2022-04-18T15:23:07Z", + "lastUpdatedDateTime": "2022-04-18T15:23:08Z", + "expirationDateTime": "2022-04-25T15:23:07Z", + "status": "succeeded" } ``` diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-import-status.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-import-status.md index 8ad93c8ca8a5..ec4f53214387 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-import-status.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-import-status.md @@ -8,11 +8,10 @@ ms.topic: include ms.date: 05/19/2022 ms.author: aahi --- - Use the following **GET** request to query the status of your import job. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/export/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/import/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -20,7 +19,7 @@ Use the following **GET** request to query the status of your import job. You ca |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{JOB-ID}` | The ID for locating your export job status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -37,31 +36,11 @@ Once you send the request, you will get the following response. Keep polling thi ```json { - "jobId": "string", - "createdDateTime": "2022-04-25T10:54:07.950Z", - "lastUpdatedDateTime": "2022-04-25T10:54:07.950Z", - "expirationDateTime": "2022-04-25T10:54:07.950Z", - "status": "unknown", - "warnings": [ - { - "code": "InvalidRequest", - "message": "string", - "target": "string", - "details": [ - "string" - ] - } - ], - "errors": [ - { - "code": "InvalidRequest", - "message": "string", - "target": "string", - "details": [ - "string" - ] - } - ] + "jobId": "xxxxx-xxxxx-xxxx-xxxxx", + "createdDateTime": "2022-04-18T15:17:20Z", + "lastUpdatedDateTime": "2022-04-18T15:17:22Z", + "expirationDateTime": "2022-04-25T15:17:20Z", + "status": "succeeded" } ``` diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-project-details.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-project-details.md index 56f1f6e55470..15896c6dcff7 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-project-details.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-project-details.md @@ -8,18 +8,17 @@ ms.topic: include ms.date: 05/19/2022 ms.author: aahi --- - Use the following **GET** request to get your project details. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}?api-version=2021-11-01-preview +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | #### Headers @@ -32,16 +31,14 @@ Use the following header to authenticate your request. #### Response body ```json - { - "createdDateTime": "2021-10-19T23:24:41.572Z", - "lastModifiedDateTime": "2021-10-19T23:24:41.572Z", - "lastTrainedDateTime": "2021-10-19T23:24:41.572Z", - "lastDeployedDateTime": "2021-10-19T23:24:41.572Z", - "type": "orchestration", - "name": "myProject", - "multiLingual": true, - "description": "string", - "language": "en-us", - "settings": {} - } +{ + "createdDateTime": "2022-04-18T13:53:03Z", + "lastModifiedDateTime": "2022-04-18T13:53:03Z", + "lastTrainedDateTime": "2022-04-18T14:14:28Z", + "lastDeployedDateTime": "2022-04-18T14:49:01Z", + "projectKind": "Orchestration", + "projectName": "{PROJECT-NAME}", + "description": "This is a sample Orchestration project.", + "language": "{LANGUAGE-CODE}" +} ``` diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-training-status.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-training-status.md index 95d8b721e08b..c4afc8ab68d1 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-training-status.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/get-training-status.md @@ -9,22 +9,20 @@ ms.date: 05/17/2022 ms.author: aahi --- - - -Use the following **GET** request to query the status of your model's training process. You can use the URL you received from the previous step, or replace the placeholder values below with your own values. +Use the following **GET** request to get the status of your model's training progress. Replace the placeholder values below with your own values. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| -|`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | +|`{YOUR-ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received in the previous step. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{JOB-ID}` | The ID for locating your model's training status. This is in the `location` header value you received when submitted your training job. | `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest version released. See [Model lifecycle](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) to learn more about other available API versions. | `2022-05-01` | ### Headers @@ -37,26 +35,28 @@ Use the following header to authenticate your request. ### Response Body -Once you send the request, you will get the following response. Keep polling this endpoint until the `status` parameter changes to `succeeded`. +Once you send the request, you will get the following response. Keep polling this endpoint until the **status** parameter changes to "succeeded". ```json { "result": { "modelLabel": "{MODEL-LABEL}", - "trainingConfigVersion": "{TRAINING-CONGIF-VERSION}", - "trainingMode": "{TRAINING-MODE}", + "trainingConfigVersion": "{TRAINING-CONFIG-VERSION}", + "estimatedEndDateTime": "2022-04-18T15:47:58.8190649Z", "trainingStatus": { - "percentComplete": 2, - "startDateTime": "{START-TIME}", - "status": "{STATUS}" + "percentComplete": 3, + "startDateTime": "2022-04-18T15:45:06.8190649Z", + "status": "running" }, - "evaluationStatus": { "percentComplete": 0, "status": "notStarted" }, - "estimatedEndDateTime": "{ESTIMATED-END-TIME}" + "evaluationStatus": { + "percentComplete": 0, + "status": "notStarted" + } }, - "jobId": "{JOB-ID}", - "createdDateTime": "{CREATED-TIME}", - "lastUpdatedDateTime": "{UPDATED-TIME}", - "expirationDateTime": "{EXPIRATION-TIME}", + "jobId": "xxxxxx-xxxxx-xxxxxx-xxxxxx", + "createdDateTime": "2022-04-18T15:44:44Z", + "lastUpdatedDateTime": "2022-04-18T15:45:48Z", + "expirationDateTime": "2022-04-25T15:44:44Z", "status": "running" } ``` @@ -64,15 +64,11 @@ Once you send the request, you will get the following response. Keep polling thi |Key |Value | Example | |---------|----------|--| | `modelLabel` |The model name| `Model1` | -| `trainingConfigVersion` | The training configuration version. By default, the latest [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) is used. | `2022-05-01` | -| `trainingMode` | Your training mode.| `standard` | -| `startDateTime` | The time training started. |`2022-04-14T10:23:04.2598544Z`| -| `status` | The status of the training job. | `running`| -| `estimatedEndDateTime` | Estimated time for the training job to finish.| `2022-04-14T10:29:38.2598544Z`| -|`jobId`| Your training job ID.| `xxxxx-xxxx-xxxx-xxxx-xxxxxxxxx`| -|`createdDateTime`| Training job creation date and time. | `2022-04-14T10:22:42Z`| -|`lastUpdatedDateTime`| Training job last updated date and time. | `2022-04-14T10:23:45Z`| -|`expirationDateTime`| Training job expiration date and time. | `2022-04-14T10:22:42Z`| - - - +| `trainingConfigVersion` | The training configuration version. By default, the [latest version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) is used. | `2022-05-01` | +| `startDateTime` | The time training started |`2022-04-14T10:23:04.2598544Z`| +| `status` | The status of the training job | `running`| +|`estimatedEndDateTime` | Estimated time for the training job to finish| `2022-04-14T10:29:38.2598544Z`| +|`jobId`| Your training job ID| `xxxxx-xxxx-xxxx-xxxx-xxxxxxxxx`| +|`createdDateTime`| Training job creation date and time | `2022-04-14T10:22:42Z`| +|`lastUpdatedDateTime`| Training job last updated date and time | `2022-04-14T10:23:45Z`| +|`expirationDateTime`| Training job expiration date and time | `2022-04-14T10:22:42Z`| diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/import-project.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/import-project.md index 370f2cae4131..c4a76c30bb96 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/import-project.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/import-project.md @@ -9,20 +9,21 @@ ms.date: 05/17/2022 ms.author: aahi --- +Submit a **POST** request using the following URL, headers, and JSON body to import your project. ### Request URL -Create a **POST** request using the following URL, headers, and JSON body to import your project. Use the following URL when creating your API request. Replace the placeholder values below with your own values. +Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/authoring/analyze-text/projects/{PROJECT-NAME}/:import?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:import?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | -|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | +|`{API-VERSION}` | The version of the API you're calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -31,7 +32,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -| `format` | `clu` | ### Body @@ -39,19 +39,19 @@ Use the following sample JSON as your body. ```json { - "api-version": "{API-VERSION}", + "projectFileVersion": "{API-VERSION}", "stringIndexType": "Utf16CodeUnit", "metadata": { - "projectKind": "orchestration", + "projectKind": "Orchestration", "settings": { "confidenceThreshold": 0 }, "projectName": "{PROJECT-NAME}", - "multilingual": true, "description": "Project description", "language": "{LANGUAGE-CODE}" }, "assets": { + "projectKind": "Orchestration", "intents": [ { "category": "string", diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/model-evaluation.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/model-evaluation.md index e16f3572e35a..e92664c07adc 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/model-evaluation.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/model-evaluation.md @@ -9,13 +9,13 @@ ms.date: 05/20/2022 ms.author: aahi --- -Create a **GET** request using the following URL, headers, and JSON body to get trained model evaluation summary. +Create a **GET** request using the following URL, headers, and JSON body to get the trained model evaluation summary. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{projectName}/models/{trainedModelLabel}/evaluation/summary-result?api-version={API-VERSION} ``` |Placeholder |Value | Example | @@ -23,7 +23,7 @@ Create a **GET** request using the following URL, headers, and JSON body to get |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | |`{trainedModelLabel}` | The name for your trained model. This value is case-sensitive. | `Model1` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -33,7 +33,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | ### Response Body diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/project-details.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/project-details.md index 1097d579b7c5..c9767ff1cfdd 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/project-details.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/project-details.md @@ -10,9 +10,7 @@ ms.date: 05/20/2022 ms.author: aahi --- - To get an orchestration workflow project's details, submit a **GET** request using the following URL and headers. Replace the placeholder values with your own values. - ```rest {ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}?api-version={API-VERSION} ``` @@ -21,7 +19,7 @@ To get an orchestration workflow project's details, submit a **GET** request usi |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -37,20 +35,15 @@ Once you send the request, you will get the following response. ```json { - "createdDateTime": "{CREATED-TIME}", - "lastModifiedDateTime": "{CREATED-TIME}", - "lastTrainedDateTime": "{CREATED-TIME}", - "lastDeployedDateTime": "{CREATED-TIME}", - "projectKind": "orchestration", - "settings": { - "confidenceThreshold": 0 - }, + "createdDateTime": "2022-04-18T13:53:03Z", + "lastModifiedDateTime": "2022-04-18T13:53:03Z", + "lastTrainedDateTime": "2022-04-18T14:14:28Z", + "lastDeployedDateTime": "2022-04-18T14:49:01Z", + "projectKind": "Orchestration", "projectName": "{PROJECT-NAME}", - "multilingual": true, - "description": "string", + "description": "This is a sample orchestration project.", "language": "{LANGUAGE-CODE}" } - ``` -Once you send your API request, you will receive a `202` response indicating success and JSON response body with your project details. +Once you send your API request, you will receive a `200` response indicating success and JSON response body with your project details. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/query-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/query-model.md index 4a2f6b458df0..043bfbd7a5c8 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/query-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/query-model.md @@ -11,19 +11,16 @@ ms.author: aahi Create a **POST** request using the following URL, headers, and JSON body to start testing an orchestration workflow model. - ### Request URL ```rest -{ENDPOINT}/language/:analyze-conversations?projectName={PROJECT-NAME}&deploymentName={DEPLOYMENT-NAME}?api-version={API-VERSION} +{ENDPOINT}/language/:analyze-conversations?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | -|`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{DEPLOYMENT-NAME}` | The name for your deployment. This value is case-sensitive. | `staging` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -33,13 +30,38 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Request Body ```json { - "query":"attach a docx file" + "kind": "Conversation", + "analysisInput": { + "conversationItem": { + "text": "Text1", + "participantId": "1", + "id": "1" + } + }, + "parameters": { + "projectName": "{PROJECT-NAME}", + "deploymentName": "{DEPLOYMENT-NAME}", + "directTarget": "qnaProject", + "targetProjectParameters": { + "qnaProject": { + "targetProjectKind": "QuestionAnswering", + "callingOptions": { + "context": { + "previousUserQuery": "Meet Surface Pro 4", + "previousQnaId": 4 + }, + "top": 1, + "question": "App Service overview" + } + } + } + } } ``` @@ -49,17 +71,38 @@ Once you send the request, you will get the following response for the predictio ```json { - "query": "attach a docx file", - "prediction": { - "topIntent": "Attach", - "projectKind": "workflow", - "intents": [ - { "category": "Attach", "confidenceScore": 0.9998592 }, - { "category": "Read", "confidenceScore": 0.00010551753 }, - { "category": "Delete", "confidenceScore": 3.5209276e-5 } - ] + "kind": "ConversationResult", + "result": { + "query": "App Service overview", + "prediction": { + "projectKind": "Orchestration", + "topIntent": "qnaTargetApp", + "intents": { + "qnaTargetApp": { + "targetProjectKind": "QuestionAnswering", + "confidenceScore": 1, + "result": { + "answers": [ + { + "questions": [ + "App Service overview" + ], + "answer": "The compute resources you use are determined by the *App Service plan* that you run your apps on.", + "confidenceScore": 0.7384000000000001, + "id": 1, + "source": "https://docs.microsoft.com/en-us/azure/app-service/overview", + "metadata": {}, + "dialog": { + "isContextOnly": false, + "prompts": [] + } + } + ] + } + } + } + } } } - ``` diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/swap-deployment.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/swap-deployment.md index 8f0a301e0472..b132c9a22d56 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/swap-deployment.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/swap-deployment.md @@ -10,21 +10,20 @@ ms.author: aahi --- - Create a **POST** request using the following URL, headers, and JSON body to start a swap deployments job. ### Request URL ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/deployments:swap?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/deployments:swap?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `myProject` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-05-01` | ### Headers @@ -34,7 +33,6 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | ### Request Body @@ -46,10 +44,9 @@ Use the following header to authenticate your request. ``` -|Key| value| Example| -|--|--|--| -|`firstDeploymentName` | The name for your first deployment. This value is case-sensitive. | `production` | -|`secondDeploymentName` | The name for your second deployment. This value is case-sensitive. | `staging` | - +|Key|Placeholder| Value| Example| +|--|--|--|--| +|firstDeploymentName |`{FIRST-DEPLOYMENT-NAME}`| The name for your first deployment. This value is case-sensitive. | `production` | +|secondDeploymentName | `{SECOND-DEPLOYMENT-NAME}`|The name for your second deployment. This value is case-sensitive. | `staging` | Once you send your API request, you will receive a `202` response indicating success. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/train-model.md b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/train-model.md index 87375d7b93e1..2918fbffc9f5 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/train-model.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/includes/rest-api/train-model.md @@ -8,19 +8,21 @@ ms.date: 05/17/2022 ms.author: aahi --- +Create a **POST** request using the following URL, headers, and JSON body to submit a training job. + ### Request URL -Create a **POST** request using the following URL, headers, and JSON body to start training. Use the following URL when creating your API request. Replace the placeholder values below with your own values. +Use the following URL when creating your API request. Replace the placeholder values below with your own values. ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/:train?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/:train?api-version={API-VERSION} ``` |Placeholder |Value | Example | |---------|---------|---------| |`{ENDPOINT}` | The endpoint for authenticating your API request. | `https://.cognitiveservices.azure.com` | |`{PROJECT-NAME}` | The name for your project. This value is case-sensitive. | `EmailApp` | -|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). | `2022-03-01-preview` | +|`{API-VERSION}` | The version of the API you are calling. The value referenced here is for the latest released [model version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data) released. | `2022-05-01` | ### Headers @@ -29,7 +31,7 @@ Use the following header to authenticate your request. |Key|Value| |--|--| |`Ocp-Apim-Subscription-Key`| The key to your resource. Used for authenticating your API requests.| -|`Content-Type` | application/json | + ### Request body @@ -39,24 +41,27 @@ Use the following object in your request. The model will be named `MyModel` once { "modelLabel": "{MODEL-NAME}", "trainingConfigVersion": "{CONFIG-VERSION}", - "trainingMode": "{TRAINING-MODE}", "evaluationOptions": { "kind": "percentage", - "trainingSplitPercentage": 80, - "testingSplitPercentage": 20 + "testingSplitPercentage": 20, + "trainingSplitPercentage": 80 } } ``` |Key |Placeholder|Value | Example | |---------|-----|----|---------| -|`modelLabel` | `{MODEL-NAME}`|Your model name. | `Model1` | -| `trainingConfigVersion` |`{CONFIG-VERSION}`| The [training configuration version](../../../concepts/model-lifecycle.md#choose-the-model-version-used-on-your-data). By default, the latest version is used. | `2022-05-01` | -| `trainingMode` |`{TRAINING-MODE}`| Your training mode. | `advanced` | +|`modelLabel` | `{MODEL-NAME}`|Your Model name. | `Model1` | +| `trainingConfigVersion` |`{CONFIG-VERSION}`| The training configuration model version. By default, the latest [model version](../../../concepts/model-lifecycle.md) is used. | `2022-05-01` | +| `kind` | `percentage` | Split methods. Possible values are `percentage` or `manual`. See [how to train a model](../../how-to/train-model.md#data-splitting) for more information. |`percentage`| +| `trainingSplitPercentage` | `80`| Percentage of your tagged data to be included in the training set. Recommended value is `80`. | `80`| +| `testingSplitPercentage` | `20` | Percentage of your tagged data to be included in the testing set. Recommended value is `20`. | `20` | + + > [!NOTE] + > The `trainingSplitPercentage` and `testingSplitPercentage` are only required if `Kind` is set to `percentage` and the sum of both percentages should be equal to 100. Once you send your API request, you will receive a `202` response indicating success. In the response headers, extract the `operation-location` value. It will be formatted like this: ```rest -{ENDPOINT}/language/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} +{ENDPOINT}/language/authoring/analyze-conversations/projects/{PROJECT-NAME}/train/jobs/{JOB-ID}?api-version={API-VERSION} ``` - -`JOB-ID` is used to identify your request, since this operation is asynchronous. You will use this URL in the next step to get the training status. +You can use this URL to get the training job status. diff --git a/articles/cognitive-services/language-service/orchestration-workflow/tutorials/connect-services.md b/articles/cognitive-services/language-service/orchestration-workflow/tutorials/connect-services.md index dcd2f96fb18f..c914f4a05927 100644 --- a/articles/cognitive-services/language-service/orchestration-workflow/tutorials/connect-services.md +++ b/articles/cognitive-services/language-service/orchestration-workflow/tutorials/connect-services.md @@ -1,5 +1,5 @@ --- -title: Intergate custom question answering and conversational language understanding into orchestration workflows +title: Integrate custom question answering and conversational language understanding with orchestration workflow description: Learn how to connect different projects with orchestration workflow. keywords: conversational language understanding, bot framework, bot, language understanding, nlu author: aahill @@ -9,16 +9,16 @@ ms.reviewer: cahann, hazemelh ms.service: cognitive-services ms.subservice: language-service ms.topic: tutorial -ms.date: 05/17/2022 +ms.date: 05/25/2022 --- -# Connect different services with orchestration workflow +# Connect different services with Orchestration workflow Orchestration workflow is a feature that allows you to connect different projects from LUIS, conversational language understanding, and custom question answering in one project. You can then use this project for predictions under one endpoint. The orchestration project makes a prediction on which project should be called and automatically routes the request to that project, and returns with its response. In this tutorial, you will learn how to connect a custom question answering knowledge base with a conversational language understanding project. You will then call the project using the .NET SDK sample for orchestration. -This tutorial will include creating a **chit chat** knowledge base and **email commands** project. Chit chat will deal with common niceties and greetings with static responses. +This tutorial will include creating a **chit chat** knowledge base and **email commands** project. Chit chat will deal with common niceties and greetings with static responses. Email commands will predict among a few simple actions for an email assistant. The tutorial will then teach you to call the Orchestrator using the SDK in a .NET environment using a sample solution. ## Prerequisites @@ -27,12 +27,12 @@ This tutorial will include creating a **chit chat** knowledge base and **email c - You will need the key and endpoint from the resource you create to connect your bot to the API. You'll paste your key and endpoint into the code below later in the tutorial. Copy them from the **Keys and Endpoint** tab in your resource. - When you enable custom question answering, you must select an Azure search resource to connect to. - Make sure the region of your resource is supported by [conversational language understanding](../../conversational-language-understanding/service-limits.md#regional-availability). -- Download the **OrchestrationWorkflowSample** sample in [**.NET**](https://aka.ms/orchestration-sample). +- Download the **OrchestrationWorkflowSample** [sample](https://aka.ms/orchestration-sample). ## Create a custom question answering knowledge base 1. Sign into the [Language Studio](https://language.cognitive.azure.com/) and select your Language resource. -2. Find and select the [custom question answering](https://language.cognitive.azure.com/questionAnswering/projects/) card in the homepage. +2. Find and select the [Custom question answering](https://language.cognitive.azure.com/questionAnswering/projects/) card in the homepage. 3. Click on **Create new project** and add the name **chitchat** with the language _English_ before clicking on **Create project**. 4. When the project loads, click on **Add source** and select _Chit chat_. Select the professional personality for chit chat before @@ -40,38 +40,38 @@ This tutorial will include creating a **chit chat** knowledge base and **email c 5. Go to **Deploy knowledge base** from the left navigation menu and click on **Deploy** and confirm the popup that shows up. -You are now done with deploying your knowledge base for chit chat. You can explore the type of questions and answers to expect in the **Edit knowledge base** tab. +You are now done with deploying your knowledge base for chit chat. You can explore the type of questions and answers to expect in the **Edit knowledge base** page. ## Create a conversational language understanding project -1. In Language Studio, go to the [conversational language understanding](https://language.cognitive.azure.com/clu/projects) service. +1. In Language Studio, go to the [Conversational language understanding](https://language.cognitive.azure.com/clu/projects) service. 2. Download the **EmailProject.json** sample file [here](https://aka.ms/clu-sample-json). -3. Click on the arrow next to **Create new project** and select **Import**. Browse to the downloaded EmailProject.json file you downloaded and press Done. +3. Click on the **Import** button. Browse to the EmailProject.json file you downloaded and press Done. :::image type="content" source="../media/import-export.png" alt-text="A screenshot showing where to import a J son file." lightbox="../media/import-export.png"::: -4. Once the project is loaded, click on **Training** on the left. Press on Start a training job, provide the model name **v1** and press Train. All other settings such as **Standard Training** and the evaluation settings can be left as is. +4. Once the project is loaded, click on **Training jobs** on the left. Press on Start a training job, provide the model name **v1** and press Train. :::image type="content" source="../media/train-model.png" alt-text="A screenshot of the training page." lightbox="../media/train-model.png"::: -5. Once training is complete, click to **Deployments** on the left. Click on Add Deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment. +5. Once training is complete, click to **Deploying a model** on the left. Click on Add Deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment. :::image type="content" source="../media/deploy-model-tutorial.png" alt-text="A screenshot showing the model deployment page." lightbox="../media/deploy-model-tutorial.png"::: -You are now done with deploying a conversational language understanding project for email commands. You can explore the different commands in the **Utterances** page. +You are now done with deploying a conversational language understanding project for email commands. You can explore the different commands in the **Data labeling** page. -## Create an orchestration workflow project +## Create an Orchestration workflow project -1. In Language Studio, go to the [orchestration workflow](https://language.cognitive.azure.com/orchestration/projects) service. +1. In Language Studio, go to the [Orchestration workflow](https://language.cognitive.azure.com/orchestration/projects) service. 2. Click on **Create new project**. Use the name **Orchestrator** and the language _English_ before clicking next then done. -3. Once the project is created, click on **Add** in the **Build schema** page. +3. Once the project is created, click on **Add** in the **Schema definition** page. 4. Select _Yes, I want to connect it to an existing project_. Add the intent name **EmailIntent** and select **Conversational Language Understanding** as the connected service. Select the recently created **EmailProject** project for the project name before clicking on **Add Intent**. :::image type="content" source="../media/connect-intent-tutorial.png" alt-text="A screenshot of the connect intent popup in orchestration workflow." lightbox="../media/connect-intent-tutorial.png"::: 5. Add another intent but now select **Question Answering** as the service and select **chitchat** as the project name. -6. Similar to conversational language understanding, go to **Training** and start a new training job with the name **v1** and press Train. -7. Once training is complete, click to **Deployments** on the left. Click on Add deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment and press Next. +6. Similar to conversational language understanding, go to **Training jobs** and start a new training job with the name **v1** and press Train. +7. Once training is complete, click to **Deploying a model** on the left. Click on Add deployment and create a new deployment with the name **Testing**, and assign model **v1** to the deployment and press Next. 8. On the next page, select the deployment name **Testing** for the **EmailIntent**. This tells the orchestrator to call the **Testing** deployment in **EmailProject** when it routes to it. Custom question answering projects only have one deployment by default. :::image type="content" source="../media/deployment-orchestrator-tutorial.png" alt-text="A screenshot of the deployment popup for orchestration workflow." lightbox="../media/deployment-orchestrator-tutorial.png"::: @@ -80,27 +80,29 @@ Now your orchestration project is ready to be used. Any incoming request will be ## Call the orchestration project with the Conversations SDK -1. In the downloaded **OrchestrationWorkflowSample** solution, make sure to install all the required packages. In Visual Studio, go to _Tools_, _NuGet Package Manager_ and select _Package Manager Console_ and run the following command. +1. In the downloaded sample, open OrchestrationWorkflowSample.sln in Visual Studio. + +2. In the OrchestrationWorkflowSample solution, make sure to install all the required packages. In Visual Studio, go to _Tools_, _NuGet Package Manager_ and select _Package Manager Console_ and run the following command. ```powershell dotnet add package Azure.AI.Language.Conversations ``` -2. In `Program.cs`, replace `{api-key}` and the placeholder endpoint. Use the key and endpoint for the Language resource you created earlier. You can find them in the **Keys and Endpoint** tab in your Language resource in Azure. +3. In `Program.cs`, replace `{api-key}` and the `{endpoint}` variables. Use the key and endpoint for the Language resource you created earlier. You can find them in the **Keys and Endpoint** tab in your Language resource in Azure. ```csharp -Uri endpoint = new Uri("https://myaccount.api.cognitive.microsoft.com"); +Uri endpoint = new Uri("{endpoint}"); AzureKeyCredential credential = new AzureKeyCredential("{api-key}"); ``` -3. Replace the orchestrationProject parameters to **Orchestrator** and **Testing** as below if they are not set already. +4. Replace the orchestrationProject parameters to **Orchestrator** and **Testing** as below if they are not set already. ```csharp ConversationsProject orchestrationProject = new ConversationsProject("Orchestrator", "Testing"); ``` -4. Run the project or press F5 in Visual Studio. -5. Input a query such as "read the email from matt" or "hello how are you". You'll now observe different responses for each, a conversational language understanding **EmailProject** response from the first, and the answer from the **chitchat** for the second query. +5. Run the project or press F5 in Visual Studio. +6. Input a query such as "read the email from matt" or "hello how are you". You'll now observe different responses for each, a conversational language understanding **EmailProject** response from the first query, and the answer from the **chitchat** knowledge base for the second query. **Conversational Language Understanding**: :::image type="content" source="../media/clu-response-orchestration.png" alt-text="A screenshot showing the sample response from conversational language understanding." lightbox="../media/clu-response-orchestration.png"::: diff --git a/articles/cognitive-services/language-service/question-answering/includes/rest.md b/articles/cognitive-services/language-service/question-answering/includes/rest.md index f1fbeada5b15..9515b4db124a 100644 --- a/articles/cognitive-services/language-service/question-answering/includes/rest.md +++ b/articles/cognitive-services/language-service/question-answering/includes/rest.md @@ -14,7 +14,7 @@ ms.custom: ignite-fall-2021 * Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services) * Question answering, requires a [Language resource](https://portal.azure.com/?quickstart=true#create/Microsoft.CognitiveServicesTextAnalytics) with the custom question answering feature enabled to generate an API key and endpoint. * After your Language resource deploys, select **Go to resource**. You will need the key and endpoint from the resource you create to connect to the API. Paste your key and endpoint into the code below later in the quickstart. -* To create a Language resource with [Azure CLI](/azure/cognitive-services/cognitive-services-apis-create-account-cli) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` +* To create a Language resource with [Azure CLI](../../../cognitive-services-apis-create-account-cli.md) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` * An existing knowledge base to query. If you have not setup a knowledge base, you can follow the instructions in the [**Language Studio quickstart**](../quickstart/sdk.md). Or add a knowledge base that uses this [Surface User Guide URL](https://download.microsoft.com/download/7/B/1/7B10C82E-F520-4080-8516-5CF0D803EEE0/surface-book-user-guide-EN.pdf) as a data source. ## Query a knowledge base @@ -151,4 +151,4 @@ This example will return a result of: } ] } -``` +``` \ No newline at end of file diff --git a/articles/cognitive-services/language-service/question-answering/includes/sdk-csharp.md b/articles/cognitive-services/language-service/question-answering/includes/sdk-csharp.md index a0f8babda794..f650d5c332ec 100644 --- a/articles/cognitive-services/language-service/question-answering/includes/sdk-csharp.md +++ b/articles/cognitive-services/language-service/question-answering/includes/sdk-csharp.md @@ -26,7 +26,7 @@ Use this quickstart for the question answering client library for .NET to: * The [Visual Studio IDE](https://visualstudio.microsoft.com/vs/) or current version of [.NET Core](https://dotnet.microsoft.com/download/dotnet-core). * Question answering, requires a [Language resource](https://portal.azure.com/?quickstart=true#create/Microsoft.CognitiveServicesTextAnalytics) with the custom question answering feature enabled to generate an API key and endpoint. * After your Language resource deploys, select **Go to resource**. You will need the key and endpoint from the resource you create to connect to the API. Paste your key and endpoint into the code below later in the quickstart. -* To create a Language resource with [Azure CLI](/azure/cognitive-services/cognitive-services-apis-create-account-cli) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` +* To create a Language resource with [Azure CLI](../../../cognitive-services-apis-create-account-cli.md) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` * An existing knowledge base to query. If you have not setup a knowledge base, you can follow the instructions in the [**Language Studio quickstart**](../quickstart/sdk.md). Or add a knowledge base that uses this [Surface User Guide URL](https://download.microsoft.com/download/7/B/1/7B10C82E-F520-4080-8516-5CF0D803EEE0/surface-book-user-guide-EN.pdf) as a data source. ## Setting up @@ -225,4 +225,4 @@ namespace questionansweringcsharp To run the code above, replace the `Program.cs` with the contents of the script block above and modify the `endpoint` and `credential` variables to correspond to the language resource you created as part of the prerequisites. -In this case, we iterate through all responses and only return the response with the highest confidence score that is greater than 0.9. To understand more about the options available with `GetAnswersFromText`. +In this case, we iterate through all responses and only return the response with the highest confidence score that is greater than 0.9. To understand more about the options available with `GetAnswersFromText`. \ No newline at end of file diff --git a/articles/cognitive-services/language-service/question-answering/includes/sdk-python.md b/articles/cognitive-services/language-service/question-answering/includes/sdk-python.md index b75f50bc008f..094a18898910 100644 --- a/articles/cognitive-services/language-service/question-answering/includes/sdk-python.md +++ b/articles/cognitive-services/language-service/question-answering/includes/sdk-python.md @@ -27,7 +27,7 @@ Use this quickstart for the question answering client library for Python to: * [Python 3.x](https://www.python.org/) * Question answering, requires a [Language resource](https://portal.azure.com/?quickstart=true#create/Microsoft.CognitiveServicesTextAnalytics) with the custom question answering feature enabled to generate an API key and endpoint. * After your Language resource deploys, select **Go to resource**. You will need the key and endpoint from the resource you create to connect to the API. Paste your key and endpoint into the code below later in the quickstart. -* To create a Language resource with [Azure CLI](/azure/cognitive-services/cognitive-services-apis-create-account-cli) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` +* To create a Language resource with [Azure CLI](../../../cognitive-services-apis-create-account-cli.md) provide the following additional properties during resource creation configure Custom Question Answering with your Language resource `--api-properties qnaAzureSearchEndpointId=/subscriptions//resourceGroups//providers/Microsoft.Search/searchServices/ qnaAzureSearchEndpointKey=` * An existing knowledge base to query. If you have not setup a knowledge base, you can follow the instructions in the [**Language Studio quickstart**](../quickstart/sdk.md). Or add a knowledge base that uses this [Surface User Guide URL](https://download.microsoft.com/download/7/B/1/7B10C82E-F520-4080-8516-5CF0D803EEE0/surface-book-user-guide-EN.pdf) as a data source. ## Setting up @@ -175,4 +175,4 @@ A: Power and charging. It takes two to four hours to charge the Surface Pro 4 ba Confidence Score: 0.9254655838012695 ``` -In this case, we iterate through all responses and only return the response with the highest confidence score that is greater than 0.9. To understand more about the options available with [get_answers_from_text](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0/azure.ai.language.questionanswering.html#azure.ai.language.questionanswering.QuestionAnsweringClient.get-answers-from-text), review the [AnswersFromTextOptions parameters](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0/azure.ai.language.questionanswering.models.html#azure.ai.language.questionanswering.models.AnswersFromTextOptions). +In this case, we iterate through all responses and only return the response with the highest confidence score that is greater than 0.9. To understand more about the options available with [get_answers_from_text](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0/azure.ai.language.questionanswering.html#azure.ai.language.questionanswering.QuestionAnsweringClient.get-answers-from-text), review the [AnswersFromTextOptions parameters](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-ai-language-questionanswering/1.0.0/azure.ai.language.questionanswering.models.html#azure.ai.language.questionanswering.models.AnswersFromTextOptions). \ No newline at end of file diff --git a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png deleted file mode 100644 index 8c672c5ed607..000000000000 Binary files a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation-success.png and /dev/null differ diff --git a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png b/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png deleted file mode 100644 index 76a97043172b..000000000000 Binary files a/articles/cognitive-services/language-service/question-answering/media/migrate-qnamaker/migration-kb-name-validation.png and /dev/null differ diff --git a/articles/cognitive-services/language-service/question-answering/tutorials/adding-synonyms.md b/articles/cognitive-services/language-service/question-answering/tutorials/adding-synonyms.md index a608c7c0e88d..d7188c61b84a 100644 --- a/articles/cognitive-services/language-service/question-answering/tutorials/adding-synonyms.md +++ b/articles/cognitive-services/language-service/question-answering/tutorials/adding-synonyms.md @@ -76,12 +76,36 @@ As you can see, when `troubleshoot` was not added as a synonym, we got a low con > [!IMPORTANT] > Synonyms are case insensitive. Synonyms also might not work as expected if you add stop words as synonyms. The list of stop words can be found here: [List of stop words](https://github.com/Azure-Samples/azure-search-sample-data/blob/master/STOPWORDS.md). > For instance, if you add the abbreviation **IT** for Information technology, the system might not be able to recognize Information Technology because **IT** is a stop word and is filtered when a query is processed. -> Synonyms do not allow these special characters: ',', '?', ':', ';', '\"', '\'', '(', ')', '{', '}', '[', ']', '-', '+', '.', '/', '!', '*', '-', '_', '@', '#' ## Notes * Synonyms can be added in any order. The ordering is not considered in any computational logic. -* Special characters are not allowed for synonyms. For hyphenated words like "COVID-19", they are treated the same as "COVID 19", and "space" can be used as a term separator. * In case of overlapping synonym words between 2 sets of alterations, it may have unexpected results and it is not recommended to use overlapping sets. +* Special characters are not allowed for synonyms. For hyphenated words like "COVID-19", they are treated the same as "COVID 19", and "space" can be used as a term separator. Following is the list of special characters **not allowed**: + +|Special character | Symbol| +|--------------|--------------------------------| +|Comma | ,| +|Question mark | ?| +|Colon| :| +|Semicolon| ;| +|Double quotation mark| \"| +|Single quotation mark| \'| +|Open parenthesis|(| +|Close parenthesis|)| +|Open brace|{| +|Close brace|}| +|Open bracket|[| +|Close bracket|]| +|Hyphen/dash|-| +|Plus sign|+| +|Period|.| +|Forward slash|/| +|Exclamation mark|!| +|Asterisk|\*| +|Underscore|\_| +|Ampersand|@| +|Hash|#| + ## Next steps diff --git a/articles/cognitive-services/language-service/summarization/how-to/document-summarization.md b/articles/cognitive-services/language-service/summarization/how-to/document-summarization.md index b27f8cce1e40..b2be9edf00fd 100644 --- a/articles/cognitive-services/language-service/summarization/how-to/document-summarization.md +++ b/articles/cognitive-services/language-service/summarization/how-to/document-summarization.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: how-to -ms.date: 03/16/2022 +ms.date: 05/26/2022 ms.author: aahi ms.custom: language-service-summarization, ignite-fall-2021 --- @@ -93,4 +93,4 @@ Using the above example, the API might return the following summarized sentences ## See also -* [Document summarization overview](../overview.md) +* [Summarization overview](../overview.md) diff --git a/articles/cognitive-services/language-service/summarization/language-support.md b/articles/cognitive-services/language-service/summarization/language-support.md index 0c0d6aa4093d..11e6da26be9f 100644 --- a/articles/cognitive-services/language-service/summarization/language-support.md +++ b/articles/cognitive-services/language-service/summarization/language-support.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: conceptual -ms.date: 05/11/2022 +ms.date: 05/26/2022 ms.author: aahi ms.custom: language-service-summarization, ignite-fall-2021 --- @@ -50,4 +50,4 @@ Conversation summarization supports the following languages: ## Next steps -[Document summarization overview](overview.md) +* [Summarization overview](overview.md) diff --git a/articles/cognitive-services/language-service/summarization/overview.md b/articles/cognitive-services/language-service/summarization/overview.md index 112b88c9de69..4c36d32a7b9c 100644 --- a/articles/cognitive-services/language-service/summarization/overview.md +++ b/articles/cognitive-services/language-service/summarization/overview.md @@ -8,7 +8,7 @@ manager: nitinme ms.service: cognitive-services ms.subservice: language-service ms.topic: overview -ms.date: 05/06/2022 +ms.date: 05/26/2022 ms.author: aahi ms.custom: language-service-summarization, ignite-fall-2021, event-tier1-build-2022 --- @@ -91,11 +91,11 @@ Conversation summarization feature would simplify the text into the following: --- -## Get started with text summarization +## Get started with summarization # [Document summarization](#tab/document-summarization) -To use this feature, you submit raw unstructured text for analysis and handle the API output in your application. Analysis is performed as-is, with no additional customization to the model used on your data. There are two ways to use text summarization: +To use this feature, you submit raw unstructured text for analysis and handle the API output in your application. Analysis is performed as-is, with no additional customization to the model used on your data. There are two ways to use summarization: |Development option |Description | Links | @@ -119,8 +119,8 @@ To use this feature, you submit raw text for analysis and handle the API output # [Document summarization](#tab/document-summarization) -* Text summarization takes raw unstructured text for analysis. See [Data and service limits](../concepts/data-limits.md) in the how-to guide for more information. -* Text summarization works with a variety of written languages. See [language support](language-support.md) for more information. +* Summarization takes raw unstructured text for analysis. See [Data and service limits](../concepts/data-limits.md) in the how-to guide for more information. +* Summarization works with a variety of written languages. See [language support](language-support.md) for more information. # [Conversation summarization](#tab/conversation-summarization) @@ -144,6 +144,10 @@ As you use document summarization in your applications, see the following refere ## Responsible AI -An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it’s deployed. Read the [transparency note for document summarization](/legal/cognitive-services/language-service/transparency-note-extractive-summarization?context=/azure/cognitive-services/language-service/context/context) to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: +An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it’s deployed. Read the [transparency note for summarization](/legal/cognitive-services/language-service/transparency-note-extractive-summarization?context=/azure/cognitive-services/language-service/context/context) to learn about responsible AI use and deployment in your systems. You can also see the following articles for more information: + +* [Transparency note for Azure Cognitive Service for Language](/legal/cognitive-services/language-service/transparency-note?context=/azure/cognitive-services/language-service/context/context) +* [Integration and responsible use](/legal/cognitive-services/language-service/guidance-integration-responsible-use-summarization?context=/azure/cognitive-services/language-service/context/context) +* [Characteristics and limitations of summarization](/legal/cognitive-services/language-service/characteristics-and-limitations-summarization?context=/azure/cognitive-services/language-service/context/context) +* [Data, privacy, and security](/legal/cognitive-services/language-service/data-privacy?context=/azure/cognitive-services/language-service/context/context) -[!INCLUDE [Responsible AI links](../includes/overview-responsible-ai-links.md)] diff --git a/articles/cognitive-services/language-service/toc.yml b/articles/cognitive-services/language-service/toc.yml index b657d781067e..ac199f7cddb8 100644 --- a/articles/cognitive-services/language-service/toc.yml +++ b/articles/cognitive-services/language-service/toc.yml @@ -112,6 +112,16 @@ items: href: https://aka.ms/ct-authoring-apis - name: Runtime prediction API href: https://aka.ms/ct-runtime-api + - name: SDK samples + items: + - name: Java + href: https://aka.ms/sdk-samples-java + - name: JavaScript + href: https://aka.ms/sdk-samples-java-script + - name: C# + href: https://aka.ms/sdk-samples-dot-net + - name: Python + href: https://aka.ms/sdk-samples-python - name: Custom named entity recognition (NER) items: - name: Overview @@ -188,6 +198,16 @@ items: href: https://aka.ms/ct-authoring-apis - name: Runtime prediction API href: https://aka.ms/ct-runtime-api + - name: SDK samples + items: + - name: Java + href: https://aka.ms/sdk-samples-java + - name: JavaScript + href: https://aka.ms/sdk-samples-java-script + - name: C# + href: https://aka.ms/sdk-samples-dot-net + - name: Python + href: https://aka.ms/sdk-samples-python - name: Conversational language understanding items: - name: Overview @@ -208,7 +228,7 @@ items: href: conversational-language-understanding/how-to/tag-utterances.md - name: Train a model href: conversational-language-understanding/how-to/train-model.md - - name: View model evaluation + - name: View model performance href: conversational-language-understanding/how-to/view-model-evaluation.md - name: Deploy a model href: conversational-language-understanding/how-to/deploy-model.md @@ -218,7 +238,7 @@ items: href: conversational-language-understanding/how-to/fail-over.md - name: Concepts items: - - name: Backwards compatibility + - name: Backwards compatibility with LUIS href: conversational-language-understanding/concepts/backwards-compatibility.md - name: Entity components href: conversational-language-understanding/concepts/entity-components.md @@ -228,8 +248,6 @@ items: href: conversational-language-understanding/concepts/data-formats.md - name: None intent href: conversational-language-understanding/concepts/none-intent.md - - name: Using multiple languages - href: conversational-language-understanding/concepts/multiple-languages.md - name: Enterprise readiness items: - name: Virtual networks @@ -256,6 +274,11 @@ items: href: https://aka.ms/clu-authoring-apis - name: Runtime prediction API href: https://aka.ms/clu-runtime-api + items: + - name: C# + href: https://aka.ms/sdk-sample-conversation-dot-net + - name: Python + href: https://aka.ms/sdk-samples-conversation-python - name: Entity linking items: - name: Overview @@ -507,23 +530,23 @@ items: items: - name: Overview href: orchestration-workflow/overview.md - - name: FAQ - href: orchestration-workflow/faq.md - name: Quickstart href: orchestration-workflow/quickstart.md + - name: FAQ + href: orchestration-workflow/faq.md - name: Language support href: orchestration-workflow/language-support.md - name: How-to guides items: - - name: Build schema - href: orchestration-workflow/how-to/build-schema.md - name: Create project href: orchestration-workflow/how-to/create-project.md + - name: Build schema + href: orchestration-workflow/how-to/build-schema.md - name: Label utterances href: orchestration-workflow/how-to/tag-utterances.md - name: Train a model href: orchestration-workflow/how-to/train-model.md - - name: View model evaluation + - name: View model performance href: orchestration-workflow/how-to/view-model-evaluation.md - name: Deploy a model href: orchestration-workflow/how-to/deploy-model.md @@ -541,6 +564,14 @@ items: href: orchestration-workflow/concepts/none-intent.md - name: Data formats href: orchestration-workflow/concepts/data-formats.md + - name: Enterprise readiness + items: + - name: Virtual networks + href: ../cognitive-services-virtual-networks.md?context=/azure/cognitive-services/language-service/context/context + - name: Cognitive Services security + href: ../cognitive-services-security.md?context=/azure/cognitive-services/language-service/context/context + - name: Encryption of data at rest + href: concepts/encryption-data-at-rest.md - name: Tutorials items: - name: Connect conversational language understanding and custom question answering @@ -557,6 +588,11 @@ items: href: https://aka.ms/clu-authoring-apis - name: Runtime prediction API href: https://aka.ms/clu-runtime-api + items: + - name: C# + href: https://aka.ms/sdk-sample-conversation-dot-net + - name: Python + href: https://aka.ms/sdk-samples-conversation-python - name: Personally Identifying Information (PII) detection items: - name: Overview @@ -580,14 +616,10 @@ items: items: - name: Call PII href: personally-identifiable-information/how-to-call.md - - name: Call PII for Conversation (preview) - href: personally-identifiable-information/how-to-call-for-conversations.md - name: Concepts items: - name: Recognized entity categories - href: personally-identifiable-information/concepts/entity-categories.md - - name: Recognized entity categories for conversation - href: personally-identifiable-information/concepts/conversations-entity-categories.md + href: personally-identifiable-information/concepts/entity-categories.md - name: Reference items: - name: REST API diff --git a/articles/cognitive-services/language-support.md b/articles/cognitive-services/language-support.md index cdd44db3c7b6..55d1a7f107a9 100644 --- a/articles/cognitive-services/language-support.md +++ b/articles/cognitive-services/language-support.md @@ -27,7 +27,7 @@ These Cognitive Services are language agnostic and don't have limitations based * [Computer Vision](./computer-vision/language-support.md) * [Ink Recognizer (Preview)](/previous-versions/azure/cognitive-services/Ink-Recognizer/language-support) -* [Video Indexer](/azure/azure-video-indexer/language-identification-model.md#guidelines-and-limitations) +* [Video Indexer](../azure-video-indexer/language-identification-model.md#guidelines-and-limitations) ## Language diff --git a/articles/cognitive-services/personalizer/responsible-characteristics-and-limitations.md b/articles/cognitive-services/personalizer/responsible-characteristics-and-limitations.md new file mode 100644 index 000000000000..3150195e0564 --- /dev/null +++ b/articles/cognitive-services/personalizer/responsible-characteristics-and-limitations.md @@ -0,0 +1,88 @@ +--- +title: Characteristics and limitations of Personalizer +titleSuffix: Azure Cognitive Services +description: Characteristics and limitations of Personalizer +author: jcodella +ms.author: jacodel +manager: nitinme +ms.service: cognitive-services +ms.subservice: personalizer +ms.date: 05/23/2022 +ms.topic: article +--- + + +# Characteristics and limitations of Personalizer + +Azure Personalizer can work in many scenarios. To understand where you can apply Personalizer, make sure the requirements of your scenario meet the [expectations for Personalizer to work](where-can-you-use-personalizer.md#expectations-required-to-use-personalizer). To understand whether Personalizer should be used and how to integrate it into your applications, see [Use Cases for Personalizer](responsible-use-cases.md). You'll find criteria and guidance on choosing use cases, designing features, and reward functions for your uses of Personalizer. + +Before you read this article, it's helpful to understand some background information about [how Personalizer works](how-personalizer-works.md). + + +## Select features for Personalizer + +Personalizing content depends on having useful information about the content and the user. For some applications and industries, some user features can be directly or indirectly considered discriminatory and potentially illegal. See the [Personalizer integration and responsible use guidelines](responsible-guidance-integration.md) on assessing features to use with Personalizer. + + +## Computing rewards for Personalizer + +Personalizer learns to improve action choices based on the reward score provided by your application business logic. +A well-built reward score will act as a short-term proxy to a business goal that's tied to an organization's mission. +For example, rewarding on clicks will make Personalizer seek clicks at the expense of everything else, even if what's clicked is distracting to the user or not tied to a business outcome. +In contrast, a news site might want to set rewards tied to something more meaningful than clicks, such as "Did the user spend enough time to read the content?" or "Did the user click relevant articles or references?" With Personalizer, it's easy to tie metrics closely to rewards. However, you will need to be careful not to confound short-term user engagement with desired outcomes. + + +## Unintended consequences from reward scores + +Even if built with the best intentions reward scores might create unexpected consequences or unintended results because of how Personalizer ranks content. + +Consider the following examples: + +- Rewarding video content personalization on the percentage of the video length watched will probably tend to rank shorter videos higher than longer videos. +- Rewarding social media shares, without sentiment analysis of how it's shared or the content itself, might lead to ranking offensive, unmoderated, or inflammatory content. This type of content tends to incite a lot of engagement but is often damaging. +- Rewarding the action on user interface elements that users don't expect to change might interfere with the usability and predictability of the user interface. For example, buttons that change location or purpose without warning might make it harder for certain groups of users to stay productive. + +Implement these best practices: + +- Run offline experiments with your system by using different reward approaches to understand impact and side effects. +- Evaluate your reward functions, and ask yourself how a naïve person might alter its interpretation which may result in unintentional or undesirable outcomes. +- Archive information and assets, such as models, learning policies, and other data, that Personalizer uses to function, so that results can be reproducible. + + +## General guidelines to understand and improve performance + +Because Personalizer is based on Reinforcement Learning and learns from rewards to make better choices over time, performance isn't measured in traditional supervised learning terms used in classifiers, such as precision and recall. The performance of Personalizer is directly measured as the sum of reward scores it receives from your application via the Reward API. + +When you use Personalizer, the product user interface in the Azure portal provides performance information so you can monitor and act on it. The performance can be seen in the following ways: + +- If Personalizer is in Online Learning mode, you can perform [offline evaluations](concepts-offline-evaluation.md). +- If Personalizer is in [Apprentice mode](concept-apprentice-mode.md), you can see the performance metrics (events imitated and rewards imitated) in the Evaluation pane in the Azure portal. + +We recommend you perform frequent offline evaluations to maintain oversight. This task will help you monitor trends and ensure effectiveness. For example, you could decide to temporarily put Personalizer in Apprentice Mode if reward performance has a dip. + +### Personalizer performance estimates shown in Offline Evaluations: Limitations + +We define the "performance" of Personalizer as the total rewards it obtains during use. Personalizer performance estimates shown in Offline Evaluations are computed instead of measured. It is important to understand the limitations of these estimates: + +- The estimates are based on past data, so future performance may vary as the world and your users change. +- The estimates for baseline performance are computed probabilistically. For this reason, the confidence band for the baseline average reward is important. The estimate will get more precise with more events. If you use a smaller number of actions in each Rank call the performance estimate may increase in confidence as there is a higher probability that Personalizer may choose any one of them (including the baseline action) for every event. +- Personalizer constantly trains a model in near real time to improve the actions chosen for each event, and as a result, it will affect the total rewards obtained. The model performance will vary over time, depending on the recent past training data. +- Exploration and action choice are stochastic processes guided by the Personalizer model. The random numbers used for these stochastic processes are seeded from the Event Id. To ensure reproducibility of explore-exploit and other stochastic processes, use the same Event Id. +- Online performance may be capped by [exploration](concepts-exploration.md). Lowering exploration settings will limit how much information is harvested to stay on top of changing trends and usage patterns, so the balance depends on each use case. Some use cases merit starting off with higher exploration settings and reducing them over time (e.g., start with 30% and reduce to 10%). + + +### Check existing models that might accidentally bias Personalizer + +Existing recommendations, customer segmentation, and propensity model outputs can be used by your application as inputs to Personalizer. Personalizer learns to disregard features that don't contribute to rewards. Review and evaluate any propensity models to determine if they're good at predicting rewards and contain strong biases that might generate harm as a side effect. For example, look for recommendations that might be based on harmful stereotypes. Consider using tools such as [FairLearn](https://fairlearn.org/) to facilitate the process. + + +## Proactive assessments during your project lifecycle + +Consider creating methods for team members, users, and business owners to report concerns regarding responsible use and a process that prioritizes their resolution. Consider treating tasks for responsible use just like other crosscutting tasks in the application lifecycle, such as tasks related to user experience, security, or DevOps. Tasks related to responsible use and their requirements shouldn’t be afterthoughts. Responsible use should be discussed and implemented throughout the application lifecycle. + + +## Next steps + +- [Responsible use and integration](responsible-guidance-integration.md) +- [Offline evaluations](concepts-offline-evaluation.md) +- [Features for context and actions](concepts-features.md) diff --git a/articles/cognitive-services/personalizer/responsible-data-and-privacy.md b/articles/cognitive-services/personalizer/responsible-data-and-privacy.md new file mode 100644 index 000000000000..e2edcfeaed08 --- /dev/null +++ b/articles/cognitive-services/personalizer/responsible-data-and-privacy.md @@ -0,0 +1,123 @@ +--- +title: Data and privacy for Personalizer +titleSuffix: Azure Cognitive Services +description: Data and privacy for Personalizer +author: jcodella +ms.author: jacodel +manager: nitinme +ms.service: cognitive-services +ms.subservice: personalizer +ms.date: 05/23/2022 +ms.topic: article +--- + +# Data and privacy for Personalizer + +This article provides information about what data Azure Personalizer uses to work, how it processes that data, and how you can control that data. It assumes basic familiarity with [what Personalizer is](what-is-personalizer.md) and [how Personalizer works](how-personalizer-works.md). Specific terms can be found in Terminology. + + +## What data does Personalizer process? + +Personalizer processes the following types of data: +- **Context features and Action features**: Your application sends information about users, and the products or content to personalize, in aggregated form. This data is sent to Personalizer in each Rank API call in arguments for Context and Actions. You decide what to send to the API and how to aggregate it. The data is expressed as attributes or features. You provide information about your users, such as their device and their environment, as Context features. You shouldn't send features specific to a user like a phone number or email or User IDs. Action features include information about your content and product, such as movie genre or product price. For more information, see [Features for Actions and Context](concepts-features.md). +- **Reward information**: A reward score (a number between 0 and 1) ranks how well the user interaction resulting from the personalization choice mapped to a business goal. For example, an event might get a reward of "1" if a recommended article was clicked on. For more information, see [Rewards](concept-rewards.md). + +To understand more about what information you typically use with Personalizer, see [Features are information about Actions and Context](concepts-features.md). + +[!TIP] You decide which features to use, how to aggregate them, and where the information comes from when you call the Personalizer Rank API in your application. You also determine how to create reward scores. To make informed decisions about what information to use with Personalizer, see the [Personalizer responsible use guidelines](responsible-use-cases.md). + + +## How does Personalizer process data? + +The following diagram illustrates how your data is processed. + +![Diagram that shows how Personalizer processes data.](media/how-personalizer-works/personalization-how-it-works.png) + +Personalizer processes data as follows: + +1. Personalizer receives data each time the application calls the Rank API for a personalization event. The data is sent via the arguments for the Context and Actions. + +2. Personalizer uses the information in the Context and Actions, its internal AI models, and service configuration to return the rank response for the ID of the action to use. The contents of the Context and Actions are stored for no more than 48 hours in transient caches with the EventID used or generated in the Rank API. +3. The application then calls the Reward API with one or more reward scores. This information is also stored in transient caches and matched with the Actions and Context information. +4. After the rank and reward information for events is correlated, it's removed from transient caches and placed in more permanent storage. It remains in permanent storage until the number of days specified in the Data Retention setting has gone by, at which time the information is deleted. If you choose not to specify a number of days in the Data Retention setting, this data will be saved as long as the Personalizer Azure Resource is not deleted or until you choose to Clear Data via the UI or APIs. You can change the Data Retention setting at any time. +5. Personalizer continuously trains internal Personalizer AI models specific to this Personalizer loop by using the data in the permanent storage and machine learning configuration parameters in [Learning settings](concept-active-learning.md). +6. Personalizer creates [offline evaluations either](concepts-offline-evaluation.md) automatically or on demand. +Offline evaluations contain a report of rewards obtained by Personalizer models during a past time period. An offline evaluation embeds the models active at the time of their creation, and the learning settings used to create them, as well as a historical aggregate of average reward per event for that time window. Evaluations also include [feature importance](concept-feature-evaluation.md), which is a list of features observed in the time period, and their relative importance in the model. + + +### Independence of Personalizer loops + +Each Personalizer loop is separate and independent from others, as follows: + +- **No external data augmentation**: Each Personalizer loop only uses the data supplied to it by you via Rank and Reward API calls to train models. Personalizer doesn't use any additional information from any origin, such as other Personalizer loops in your own Azure subscription, Microsoft, third-party sources or subprocessors. +- **No data, model, or information sharing**: A Personalizer loop won't share information about events, features, and models with any other Personalizer loop in your subscription, Microsoft, third parties or subprocessors. + + +## How is data retained and what customer controls are available? + +Personalizer retains different types of data in different ways and provides the following controls for each. + + +### Personalizer rank and reward data + +Personalizer stores the features about Actions and Context sent via rank and reward calls for the number of days specified in configuration under Data Retention. +To control this data retention, you can: + +1. Specify the number of days to retain log storage in the [Azure portal for the Personalizer resource](how-to-settings.md)under **Configuration** > **Data Retention** or via the API. The default **Data Retention** setting is seven days. Personalizer deletes all Rank and Reward data older than this number of days automatically. + +2. Clear data for logged personalization and reward data in the Azure portal under **Model and learning settings** > **Clear data** > **Logged personalization and reward data** or via the API. + +3. Delete the Personalizer loop from your subscription in the Azure portal or via Azure resource management APIs. + +You can't access past data from Rank and Reward API calls in the Personalizer resource directly. If you want to see all the data that's being saved, configure log mirroring to create a copy of this data on an Azure Blob Storage resource you've created and are responsible for managing. + + +### Personalizer transient cache + +Personalizer stores partial data about an event separate from rank and reward calls in transient caches. Events are automatically purged from the transient cache 48 hours from the time the event occurred. + +To delete transient data, you can: + +1. Clear data for logged personalization and reward data in the Azure portal under **Model and learning settings** > **Clear data** or via the API. + +2. Delete the Personalizer loop from your subscription in the Azure portal or via Azure resource management APIs. + + +### Personalizer models and learning settings + +A Personalizer loop trains models with data from Rank and Reward API calls, driven by the hyperparameters and configuration specified in **Model and learning settings** in the Azure portal. Models are volatile. They're constantly changing and being trained on additional data in near real time. Personalizer doesn't automatically save older models and keeps overwriting them with the latest models. For more information, see ([How to manage models and learning settings](how-to-manage-model.md)). To clear models and learning settings: + +1. Reset them in the Azure portal under **Model and learning settings** > **Clear data** or via the API. + +2. Delete the Personalizer loop from your subscription in the Azure portal or via Azure resource management APIs. + + +### Personalizer evaluation reports + +Personalizer also retains the information generated in [offline evaluations](concepts-offline-evaluation.md) for reports. + +To delete offline evaluation reports, you can: + +1. Go to the Personalizer loop under the Azure portal. Go to **Evaluations** and delete the relevant evaluation. + +2. Delete evaluations via the Evaluations API. + +3. Delete the Personalizer loop from your subscription in the Azure portal or via Azure resource management APIs. + + +### Further storage considerations + +- **Customer managed keys**: Customers can configure the service to encrypt data at rest with their own managed keys. This second layer of encryption is on top of Microsoft's own encryption. +- **Geography**: In all cases, the incoming data, models, and evaluations are processed and stored in the same geography where the Personalizer resource was created. + +Also see: + +- [How to manage model and learning settings](how-to-manage-model.md) +- [Configure Personalizer learning loop](how-to-settings.md) + + +## Next steps + +- [See Responsible use guidelines for Personalizer](responsible-use-cases.md). + +To learn more about Microsoft's privacy and security commitments, see the[Microsoft Trust Center](https://www.microsoft.com/trust-center). diff --git a/articles/cognitive-services/personalizer/responsible-guidance-integration.md b/articles/cognitive-services/personalizer/responsible-guidance-integration.md new file mode 100644 index 000000000000..16c7e3cb1ac6 --- /dev/null +++ b/articles/cognitive-services/personalizer/responsible-guidance-integration.md @@ -0,0 +1,69 @@ +--- +title: Guidance for integration and responsible use of Personalizer +titleSuffix: Azure Cognitive Services +description: Guidance for integration and responsible use of Personalizer +author: jcodella +ms.author: jacodel +manager: nitinme +ms.service: cognitive-services +ms.subservice: personalizer +ms.date: 05/23/2022 +ms.topic: article +--- + + +# Guidance for integration and responsible use of Personalizer + +Microsoft works to help customers responsibly develop and deploy solutions by using Azure Personalizer. Our principled approach upholds personal agency and dignity by considering the AI system's: + +- Fairness, reliability, and safety. +- Privacy and security. +- Inclusiveness. +- Transparency. +- Human accountability. + +These considerations reflect our commitment to developing responsible AI. + + +## General guidelines for integration and responsible use principles + +When you get ready to integrate and responsibly use AI-powered products or features, the following activities will help to set you up for success: + +- Understand what it can do. Fully assess the potential of Personalizer to understand its capabilities and limitations. Understand how it will perform in your particular scenario and context by thoroughly testing it with real-life conditions and data. + +- **Respect an individual's right to privacy**. Only collect data and information from individuals for lawful and justifiable purposes. Only use data and information that you have consent to use for this purpose. + +- **Obtain legal review**. Obtain appropriate legal advice to review Personalizer and how you are using it in your solution, particularly if you will use it in sensitive or high-risk applications. Understand what restrictions you might need to work within and your responsibility to resolve any issues that might come up in the future. + +- **Have a human in the loop**. Include human oversight as a consistent pattern area to explore. Ensure constant human oversight of the AI-powered product or feature. Maintain the role of humans in decision making. Make sure you can have real-time human intervention in the solution to prevent harm and manage situations when the AI system doesn’t perform as expected. + +- **Build trust with affected stakeholders**. Communicate the expected benefits and potential risks to affected stakeholders. Help people understand why the data is needed and how the use of the data will lead to their benefit. Describe data handling in an understandable way. + +- **Create a customer feedback loop**. Provide a feedback channel that allows users and individuals to report issues with the service after it's deployed. After you've deployed an AI-powered product or feature, it requires ongoing monitoring and improvement. Be ready to implement any feedback and suggestions for improvement. Establish channels to collect questions and concerns from affected stakeholders. People who might be directly or indirectly affected by the system include employees, visitors, and the general public. + +- **Feedback**: Seek feedback from a diverse sampling of the community during the development and evaluation process (for example, historically marginalized groups, people with disabilities, and service workers). For more information, see Community jury. + +- **User Study**: Any consent or disclosure recommendations should be framed in a user study. Evaluate the first and continuous-use experience with a representative sample of the community to validate that the design choices lead to effective disclosure. Conduct user research with 10-20 community members (affected stakeholders) to evaluate their comprehension of the information and to determine if their expectations are met. + +- **Transparency**: Consider providing users with information about how the content was personalized. For example, you can give your users a button labeled Why These Suggestions? that shows which top features of the user and actions played a role in producing the Personalizer results. + +- **Adversarial use**: consider establishing a process to detect and act on malicious manipulation. There are actors that will take advantage of machine learning and AI systems' ability to learn from their environment. With coordinated attacks, they can artificially fake patterns of behavior that shift the data and AI models toward their goals. If your use of Personalizer could influence important choices, make sure you have the appropriate means to detect and mitigate these types of attacks in place. + + +## Your responsibility + +All guidelines for responsible implementation build on the foundation that developers and businesses using Personalizer are responsible and accountable for the effects of using these algorithms in society. If you're developing an application that your organization will deploy, you should recognize your role and responsibility for its operation and how it affects people. If you're designing an application to be deployed by a third party, come to a shared understanding of who is ultimately responsible for the behavior of the application. Make sure to document that understanding. + + +## Questions and feedback + +Microsoft is continuously upgrading tools and documents to help you act on these responsibilities. Our team invites you to [provide feedback to Microsoft](mailto:cogsvcs-RL-feedback@microsoft.com?subject%3DPersonalizer%20Responsible%20Use%20Feedback&body%3D%5BPlease%20share%20any%20question%2C%20idea%20or%20concern%5D) if you believe other tools, product features, and documents would help you implement these guidelines for using Personalizer. + + +## Recommended reading +- See Microsoft's six principles for the responsible development of AI published in the January 2018 book, [The Future Computed](https://news.microsoft.com/futurecomputed/). + + +## Next steps + +Understand how the Personalizer API receives features: [Features: Action and Context](concepts-features.md) diff --git a/articles/cognitive-services/personalizer/responsible-use-cases.md b/articles/cognitive-services/personalizer/responsible-use-cases.md new file mode 100644 index 000000000000..1c03e2616216 --- /dev/null +++ b/articles/cognitive-services/personalizer/responsible-use-cases.md @@ -0,0 +1,75 @@ +--- +title: Transparency note for Personalizer +titleSuffix: Azure Cognitive Services +description: Transparency Note for Personalizer +author: jcodella +ms.author: jacodel +manager: nitinme +ms.service: cognitive-services +ms.subservice: personalizer +ms.date: 05/23/2022 +ms.topic: article +--- + +# Use cases for Personalizer + +## What is a Transparency Note? + +An AI system includes not only the technology, but also the people who will use it, the people who will be affected by it, and the environment in which it is deployed. Creating a system that is fit for its intended purpose requires an understanding of how the technology works, its capabilities and limitations, and how to achieve the best performance. + +Microsoft provides *Transparency Notes* to help you understand how our AI technology works. This includes the choices system owners can make that influence system performance and behavior, and the importance of thinking about the whole system, including the technology, the people, and the environment. You can use Transparency Notes when developing or deploying your own system, or share them with the people who will use or be affected by your system. + +Transparency Notes are part of a broader effort at Microsoft to put our AI principles into practice. To find out more, see [Microsoft AI Principles](https:// +www.microsoft.com/ai/responsible-ai). + +## Introduction to Personalizer + +Azure Personalizer is a cloud-based service that helps your applications choose the best content item to show your users. You can use Personalizer to determine what product to suggest to shoppers or to figure out the optimal position for an advertisement. After the content is shown to the user, your application monitors the user's reaction and reports a reward score back to Personalizer. The reward score is used to continuously improve the machine learning model using reinforcement learning. This enhances the ability of Personalizer to select the best content item in subsequent interactions based on the contextual information it receives for each. + +For more information, see: + +- [What is Personalizer?](what-is-personalizer.md) +- [Where can you use Personalizer](where-can-you-use-personalizer.md) +- [How Personalizer works](how-personalizer-works.md) + +## Key terms + +|Term| Definition| +|:-----|:----| +|**Learning Loop** | You create a Personalizer resource, called a learning loop, for every part of your application that can benefit from personalization. If you have more than one experience to personalize, create a loop for each. | +|**Online model** | The default [learning behavior](terminology.md#learning-behavior) for Personalizer where your learning loop, uses machine learning to build the model that predicts the **top action** for your content. | +|**Apprentice mode** | A [learning behavior](terminology.md#learning-behavior) that helps warm-start a Personalizer model to train without impacting the applications outcomes and actions. | +|**Rewards**| A measure of how the user responded to the Rank API's returned reward action ID, as a score between 0 to 1. The 0 to 1 value is set by your business logic, based on how the choice helped achieve your business goals of personalization. The learning loop doesn't store this reward as individual user history. | +|**Exploration**| The Personalizer service is exploring when, instead of returning the best action, it chooses a different action for the user. The Personalizer service avoids drift, stagnation, and can adapt to ongoing user behavior by exploring. | + +For more information, and additional key terms, please refer to the [Personalizer Terminology](/terminology.md) and [conceptual documentation](how-personalizer-works.md). + +## Example use cases + +Some common customer motivations for using Personalizer are to: + +- **User engagement**: Capture user interest by choosing content to increase clickthrough, or to prioritize the next best action to improve average revenue. Other mechanisms to increase user engagement might include selecting videos or music in a dynamic channel or playlist. +- **Content optimization**: Images can be optimized for a product (such as selecting a movie poster from a set of options) to optimize clickthrough, or the UI layout, colors, images, and blurbs can be optimized on a web page to increase conversion and purchase. +- **Maximize conversions using discounts and coupons**: To get the best balance of margin and conversion choose which discounts the application will provide to users, or decide which product to highlight from the results of a recommendation engine to maximize conversion. +- **Maximize positive behavior change**: Select which wellness tip question to send in a notification, messaging, or SMS push to maximize positive behavior change. +- **Increase productivity** in customer service and technical support by highlighting the most relevant next best actions or the appropriate content when users are looking for documents, manuals, or database items. + +## Considerations when choosing a use case + +- Using a service that learns to personalize content and user interfaces is useful. However, it can also be misapplied if the personalization creates harmful side effects in the real world. Consider how personalization also helps your users achieve their goals. +- Consider what the negative consequences in the real world might be if Personalizer isn't suggesting particular items because the system is trained with a bias to the behavior patterns of the majority of the system users. +- Consider situations where the exploration behavior of Personalizer might cause harm. +- Carefully consider personalizing choices that are consequential or irreversible, and that should not be determined by short-term signals and rewards. +- Don't provide actions to Personalizer that shouldn't be chosen. For example, inappropriate movies should be filtered out of the actions to personalize if making a recommendation for an anonymous or underage user. + +Here are some scenarios where the above guidance will play a role in whether, and how, to apply Personalizer: + +- Avoid using Personalizer for ranking offers on specific loan, financial, and insurance products, where personalization features are regulated, based on data the individuals don't know about, can't obtain, or can't dispute; and choices needing years and information “beyond the click” to truly assess how good recommendations were for the business and the users. +- Carefully consider personalizing highlights of school courses and education institutions where recommendations without enough exploration might propagate biases and reduce users' awareness of other options. +- Avoid using Personalizer to synthesize content algorithmically with the goal of influencing opinions in democracy and civic participation, as it is consequential in the long term, and can be manipulative if the user's goal for the visit is to be informed, not influenced. + + +## Next steps + +* [Characteristics and limitations for Personalizer](responsible-characteristics-and-limitations.md) +* [Where can you use Personalizer?](where-can-you-use-personalizer.md) diff --git a/articles/cognitive-services/personalizer/toc.yml b/articles/cognitive-services/personalizer/toc.yml index 2219d392d8c4..81b4f6342536 100644 --- a/articles/cognitive-services/personalizer/toc.yml +++ b/articles/cognitive-services/personalizer/toc.yml @@ -20,8 +20,16 @@ href: https://github.com/Azure-Samples/cognitive-services-personalizer-samples - name: Responsible use of AI items: - - name: Ethics & responsible use - href: ethics-responsible-use.md + - name: Transparency notes + items: + - name: Use cases + href: responsible-use-cases.md + - name: Characteristics and limitations + href: responsible-characteristics-and-limitations.md + - name: Data and privacy + href: responsible-data-and-privacy.md + - name: Guidance for integration and responsible use + href: responsible-guidance-integration.md - name: How-to guides items: - name: Create Personalizer Resource diff --git a/articles/communication-services/concepts/analytics/enable-logging.md b/articles/communication-services/concepts/analytics/enable-logging.md index 5cddf8884a4c..c58dac910e96 100644 --- a/articles/communication-services/concepts/analytics/enable-logging.md +++ b/articles/communication-services/concepts/analytics/enable-logging.md @@ -54,7 +54,7 @@ You'll also be prompted to select a destination to store the logs. Platform logs | Destination | Description | |:------------|:------------| -| [Log Analytics workspace](../../../azure-monitor/logs/design-logs-deployment.md) | Sending logs and metrics to a Log Analytics workspace allows you to analyze them with other monitoring data collected by Azure Monitor using powerful log queries and also to use other Azure Monitor features such as alerts and visualizations. | +| [Log Analytics workspace](../../../azure-monitor/logs/log-analytics-workspace-overview.md) | Sending logs and metrics to a Log Analytics workspace allows you to analyze them with other monitoring data collected by Azure Monitor using powerful log queries and also to use other Azure Monitor features such as alerts and visualizations. | | [Event Hubs](../../../event-hubs/index.yml) | Sending logs and metrics to Event Hubs allows you to stream data to external systems such as third-party SIEMs and other log analytics solutions. | | [Azure storage account](../../../storage/blobs/index.yml) | Archiving logs and metrics to an Azure storage account is useful for audit, static analysis, or backup. Compared to Azure Monitor Logs and a Log Analytics workspace, Azure storage is less expensive and logs can be kept there indefinitely. | diff --git a/articles/communication-services/concepts/analytics/log-analytics.md b/articles/communication-services/concepts/analytics/log-analytics.md index 4197cc9eddcc..6a9b508a1180 100644 --- a/articles/communication-services/concepts/analytics/log-analytics.md +++ b/articles/communication-services/concepts/analytics/log-analytics.md @@ -16,7 +16,7 @@ ms.subservice: data ## Overview and access -Before you can take advantage of [Log Analytics](../../../azure-monitor/logs/log-analytics-overview.md) for your Communications Services logs, you must first follow the steps outlined in [Enable logging in Diagnostic Settings](enable-logging.md). Once you have enabled your logs and a [Log Analytics Workspace](../../../azure-monitor/logs/design-logs-deployment.md), you will have access to many helpful [default query packs](../../../azure-monitor/logs/query-packs.md#default-query-pack) that will help you quickly visualize and understand the data available in your logs, which are described below. Through Log Analytics, you also get access to more Communications Services Insights via Azure Monitor Workbooks (see: [Communications Services Insights](insights.md)), the ability to create our own queries and Workbooks, [REST API access](https://dev.loganalytics.io/) to any query. +Before you can take advantage of [Log Analytics](../../../azure-monitor/logs/log-analytics-overview.md) for your Communications Services logs, you must first follow the steps outlined in [Enable logging in Diagnostic Settings](enable-logging.md). Once you have enabled your logs and a [Log Analytics Workspace](../../../azure-monitor/logs/workspace-design.md), you will have access to many helpful [default query packs](../../../azure-monitor/logs/query-packs.md#default-query-pack) that will help you quickly visualize and understand the data available in your logs, which are described below. Through Log Analytics, you also get access to more Communications Services Insights via Azure Monitor Workbooks (see: [Communications Services Insights](insights.md)), the ability to create our own queries and Workbooks, [REST API access](https://dev.loganalytics.io/) to any query. ### Access You can access the queries by starting on your Communications Services resource page, and then clicking on "Logs" in the left navigation within the Monitor section: diff --git a/articles/communication-services/concepts/developer-tools/real-time-inspection.md b/articles/communication-services/concepts/developer-tools/real-time-inspection.md index e39fd4a94351..d5d842cd0dd2 100644 --- a/articles/communication-services/concepts/developer-tools/real-time-inspection.md +++ b/articles/communication-services/concepts/developer-tools/real-time-inspection.md @@ -1,6 +1,6 @@ --- -title: Developer Tools - Real-Time Inspection for Azure Communication Services -description: Conceptual documentation outlining the capabilities provided by the Real-Time Inspection tool. +title: Developer Tools - Azure Communication Services Communication Monitoring +description: Conceptual documentation outlining the capabilities provided by the Communication Monitoring tool. author: ddematheu2 manager: chpalm services: azure-communication-services @@ -11,18 +11,18 @@ ms.topic: conceptual ms.service: azure-communication-services --- -# Real-time Inspection Tool for Azure Communication Services +# Azure Communication Services communication monitoring [!INCLUDE [Private Preview Disclaimer](../../includes/private-preview-include-section.md)] -The Real-time Inspection Tool enables Azure Communication Services developers to inspect the state of the `Call` to debug or monitor their solution. For developers building an Azure Communication Services solution, they might need visibility for debugging into general call information such as the `Call ID` or advanced states, such as did a user facing diagnostic fire. The Real-time Inspection Tool provides developers this information and more. It can be easily added to any JavaScript (Web) solution by downloading the npm package `azure/communication-tools`. +The Azure Communication Services communication monitoring tool enables developers to inspect the state of the `Call` to debug or monitor their solution. For developers building an Azure Communication Services solution, they might need visibility for debugging into general call information such as the `Call ID` or advanced states, such as did a user facing diagnostic fire. The communication monitoring tool provides developers this information and more. It can be easily added to any JavaScript (Web) solution by downloading the npm package `@azure/communication-monitoring`. >[!NOTE] ->Find the open-source repository for the tool [here](https://github.com/Azure/communication-inspection). +>Find the open-source repository for the tool [here](https://github.com/Azure/communication-monitoring). ## Capabilities -The Real-time Inspection Tool provides developers three categories of information that can be used for debugging purposes: +The Communication Monitoring tool provides developers three categories of information that can be used for debugging purposes: | Category | Descriptions | |--------------------------------|-----------------------------------| @@ -32,65 +32,79 @@ The Real-time Inspection Tool provides developers three categories of informatio Data collected by the tool is only kept locally and temporarily. It can be downloaded from within the interface. -Real-time Inspection Tool is compatible with the same browsers as the Calling SDK [here](../voice-video-calling/calling-sdk-features.md?msclkid=f9cf66e6a6de11ec977ae3f6d266ba8d#javascript-calling-sdk-support-by-os-and-browser). +Communication Monitoring is compatible with the same browsers as the Calling SDK [here](../voice-video-calling/calling-sdk-features.md?msclkid=f9cf66e6a6de11ec977ae3f6d266ba8d#javascript-calling-sdk-support-by-os-and-browser). -## Get started with Real-time Inspection Tool +## Get started with Communication Monitoring -The tool can be accessed through an npm package `azure/communication-inspection`. The package contains the `InspectionTool` object that can be attached to a `Call`. The Call Inspector requires an `HTMLDivElement` as part of its constructor on which it will be rendered. The `HTMLDivElement` will dictate the size of the Call Inspector. +The tool can be accessed through an npm package `@azure/communication-monitoring`. The package contains the `CommunicationMonitoring` object that can be attached to a `Call`. The Call Inspector requires an `HTMLDivElement` as part of its constructor on which it will be rendered. The `HTMLDivElement` will dictate the size of the Call Inspector. -### Installing Real-time Inspection Tool +### Installing Communication Monitoring ```bash -npm i @azure/communication-inspection +npm i @azure/communication-monitoring ``` -### Initialize Real-time Inspection Tool +### Initialize Communication Monitoring ```javascript -import { CallClient, CallAgent } from "@azure/communication-calling"; -import { InspectionTool } from "@azure/communication-tools"; +import { CallAgent, CallClient } from '@azure/communication-calling' +import { CommunicationMonitoring } from '@azure/communication-monitoring' -const callClient = new callClient(); -const callAgent = await callClient.createCallAgent({INSERT TOKEN CREDENTIAL}); -const call = callAgent.startCall({INSERT CALL INFORMATION}); +interface Options { + callClient: CallClient + callAgent: CallAgent + divElement: HTMLDivElement +} -const inspectionTool = new InspectionTool(call, {HTMLDivElement}); +const selectedDiv = document.getElementById('selectedDiv') + +const options = { + callClient = this.callClient, + callAgent = this.callAgent, + divElement = selectedDiv, +} + +const communicationMonitoring = new CommunicationMonitoring(options) ``` ## Usage -`start`: enable the `InspectionTool` to start reading data from the call object and storing it locally for visualization. +`start`: enable the `CommunicationMonitoring` instance to start reading data from the call object and storing it locally for visualization. ```javascript -inspectionTool.start() +communicationMonitoring.start() ``` -`stop`: disable the `InspectionTool` from reading data from the call object. +`stop`: disable the `CommunicationMonitoring` instance from reading data from the call object. ```javascript -inspectionTool.stop() +communicationMonitoring.stop() ``` -`open`: Open the `InspectionTool` in the UI. +`open`: Open the `CommunicationMonitoring` instance in the UI. ```javascript -inspectionTool.open() +communicationMonitoring.open() ``` -`close`: Dismiss the `InspectionTool` in the UI. +`close`: Dismiss the `CommunicationMonitoring` instance in the UI. ```javascript -inspectionTool.close() +communicationMonitoring.close() ``` +## Download logs + +The tool includes the ability to download the logs captured using the `Download logs` button on the top right. The tool will generate a compressed log file that can be provided to our customer support team for debugging. + ## Next Steps - [Explore User-Facing Diagnostic APIs](../voice-video-calling/user-facing-diagnostics.md) diff --git a/articles/communication-services/concepts/email/email-authentication-best-practice.md b/articles/communication-services/concepts/email/email-authentication-best-practice.md index 0416512c2a79..d1ef54eba722 100644 --- a/articles/communication-services/concepts/email/email-authentication-best-practice.md +++ b/articles/communication-services/concepts/email/email-authentication-best-practice.md @@ -58,9 +58,9 @@ A DMARC policy record allows a domain to announce that their email uses authenti ## Next steps -* [Best practices for implementing DMARC](https://docs.microsoft.com/microsoft-365/security/office-365-security/use-dmarc-to-validate-email?view=o365-worldwide#best-practices-for-implementing-dmarc-in-microsoft-365&preserve-view=true) +* [Best practices for implementing DMARC](/microsoft-365/security/office-365-security/use-dmarc-to-validate-email?preserve-view=true&view=o365-worldwide#best-practices-for-implementing-dmarc-in-microsoft-365) -* [Troubleshoot your DMARC implementation](https://docs.microsoft.com/microsoft-365/security/office-365-security/use-dmarc-to-validate-email?view=o365-worldwide#troubleshooting-your-dmarc-implementation&preserve-view=true) +* [Troubleshoot your DMARC implementation](/microsoft-365/security/office-365-security/use-dmarc-to-validate-email?preserve-view=true&view=o365-worldwide#troubleshooting-your-dmarc-implementation) * [Email domains and sender authentication for Azure Communication Services](./email-domain-and-sender-authentication.md) @@ -72,4 +72,4 @@ The following documents may be interesting to you: - Familiarize yourself with the [Email client library](../email/sdk-features.md) - How to send emails with custom verified domains?[Add custom domains](../../quickstarts/email/add-custom-verified-domains.md) -- How to send emails with Azure Managed Domains?[Add Azure Managed domains](../../quickstarts/email/add-azure-managed-domains.md) +- How to send emails with Azure Managed Domains?[Add Azure Managed domains](../../quickstarts/email/add-azure-managed-domains.md) \ No newline at end of file diff --git a/articles/communication-services/concepts/includes/sms-tollfree-pricing.md b/articles/communication-services/concepts/includes/sms-tollfree-pricing.md index 1173063a4b43..9c40146d5c3d 100644 --- a/articles/communication-services/concepts/includes/sms-tollfree-pricing.md +++ b/articles/communication-services/concepts/includes/sms-tollfree-pricing.md @@ -13,7 +13,7 @@ ms.custom: include file ms.author: prakulka --- >[!Important] ->Toll-free availability is currently restricted to Azure subscriptions that have a billing address in the United States. +>In most cases, customers with Azure subscriptions locations that match the country of the Number offer will be able to buy the Number. However, US and Canada numbers may be purchased by customers with Azure subscription locations in other countries. Please see [here](../numbers/sub-eligibility-number-capability.md) for details on in-country and cross-country purchases. The Toll-free SMS service requires provisioning a toll-free number through the Azure portal. Once a toll-free number is provisioned, pay-as-you-go pricing applies to the leasing fee, and the usage fee. The leasing fee, and the usage fee is determined by the short code type, location of the short code, and the destination. @@ -22,21 +22,29 @@ The Toll-free SMS service requires provisioning a toll-free number through the A ### Leasing Fee Fees for toll-free leasing are charged after provisioning and then recur on a month-to-month basis: -|Number type |Monthly fee | -|--------------|-----------| -|Toll-free (United States) |$2/mo| +|Country |Number type |Monthly fee| +|--------|-----------|------------| +|United States|Toll-free |$2/mo| +|Canada| Toll-free |$2/mo| ### Usage Fee -SMS offers pay-as-you-go pricing. The price is a per-message segment charge based on the destination of the message. Messages can be sent by toll-free phone numbers to phone numbers located within the United States. +SMS offers pay-as-you-go pricing. The price is a per-message segment* charge based on the destination of the message. Messages can be sent by toll-free phone numbers to phone numbers located within the United States, Canada, and Puerto Rico. The following prices include required communications taxes and fees: -|Message Type |Usage Fee | -|-----------|------------| -|Send messages (per message segment*) |$0.0075 | -|Receive messages (per message segment*) |$0.0075 | +|Country| Send Message | Receive Message| +|-----------|---------|--------------| +|United States| $0.0075 | $0.0075| +|Canada | $0.0075 | $0.0075| *Please see our guide on [SMS character limits](../sms/sms-faq.md#what-is-the-sms-character-limit) to learn more about message segments. ## Carrier surcharge -A standard carrier surcharge of $0.0025/sent message segment and $0.0010/received message segment is also applicable. A carrier surcharge is subject to change. See our guide on [Carrier surcharges](https://github.com/Azure/Communication/blob/master/sms-carrier-surcharge.md) for details. +A standard carrier surcharge is applicable to messages exchanged via toll-free numbers. A carrier surcharge is a per-message segment* charge and is subject to change. Carrier surcharge is calculated based on the destination of the message for sent messages and based on the sender of the message for received messages. See our guide on [Carrier surcharges](https://github.com/Azure/Communication/blob/master/sms-carrier-surcharge.md) for details. See our pricing example [here](../pricing.md#pricing-example-11-sms-sending) to see how SMS prices are calculated. + +|Country| Send Message | Receive Message| +|-----------|---------|--------------| +|United States| $0.0025 | $0.0010| +|Canada | $0.0085 | NA| + +*Please see our guide on [SMS character limits](../sms/sms-faq.md#what-is-the-sms-character-limit) to learn more about message segments. diff --git a/articles/communication-services/concepts/numbers/number-types.md b/articles/communication-services/concepts/numbers/number-types.md index 0916c676fd55..5262c66de443 100644 --- a/articles/communication-services/concepts/numbers/number-types.md +++ b/articles/communication-services/concepts/numbers/number-types.md @@ -18,6 +18,8 @@ Azure Communication Services allows you to use phone numbers to make voice calls ## Available options +[!INCLUDE [Regional Availability Notice](../../includes/regional-availability-include.md)] + Azure Communication Services offers three types of Numbers: Toll-Free, Local, and Short Codes. - **To send or receive an SMS**, choose a Toll-Free Number or a Short Code @@ -27,7 +29,7 @@ The table below summarizes these number types with supported capabilities: | Type | Example | Send SMS | Receive SMS | Make Calls | Receive Calls | Typical Use Case | Restrictions | | :-------------------------------------------------------------------- | :---------------- | :------: | :---------: | :--------: | :-----------: | :------------------------------------------- | :------------- | -| [Toll-Free](../../quickstarts/telephony/get-phone-number.md) | +1 (8AB) XYZ PQRS | Yes | Yes | Yes | Yes | Receive calls on IVR bots, SMS Notifications | SMS in US only | +| [Toll-Free](../../quickstarts/telephony/get-phone-number.md) | +1 (8AB) XYZ PQRS | Yes | Yes | Yes | Yes | Receive calls on IVR bots, SMS Notifications | SMS in US and CA only | | [Local (Geographic)](../../quickstarts/telephony/get-phone-number.md) | +1 (ABC) XYZ PQRS | No | No | Yes | Yes | Geography Specific Number | Calling Only | | [Short-Codes](../../quickstarts/sms/apply-for-short-code.md) | ABC-XYZ | Yes | Yes | No | No | High-velocity SMS | SMS only | diff --git a/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md b/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md index 9a5f501bdae6..e739855bd9d1 100644 --- a/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md +++ b/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md @@ -10,6 +10,7 @@ ms.author: sadas ms.date: 03/04/2022 ms.topic: conceptual ms.service: azure-communication-services +ms.custom: references_regions --- # Subscription eligibility and number capabilities @@ -22,10 +23,12 @@ To acquire a phone number, you need to be on a paid Azure subscription. Phone nu Additional details on eligible subscription types are as follows: -| Number Type | Eligible Azure Agreement Type | -| :------------------------------- | :------------------------------------------------------------------------------------------------------- | -| Toll-Free and Local (Geographic) | Modern Customer Agreement (Field and Customer Led), Modern Partner Agreement (CSP), Enterprise Agreement | -| Short-Codes | Modern Customer Agreement (Field Led) and Enterprise Agreement Only | +| Number Type | Eligible Azure Agreement Type | +| :------------------------------- | :-------------------------------------------------------------------------------------------------------- | +| Toll-Free and Local (Geographic) | Modern Customer Agreement (Field and Customer Led), Modern Partner Agreement (CSP), Enterprise Agreement* | +| Short-Codes | Modern Customer Agreement (Field Led) and Enterprise Agreement Only | + +\* Allowing the purchase of Italian phone numbers for CSP and LSP customers is planned only for General Availability launch. ## Number capabilities @@ -38,39 +41,90 @@ The tables below summarize current availability: | Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | | :---------------- | :---------- | :------------------- | :------------------- | :------------------- | :--------------------- | | USA & Puerto Rico | Toll-Free | General Availability | General Availability | General Availability | General Availability\* | -| USA & Puerto Rico | Local | Not Available | Not Available | General Availability | General Availability\* | -| USA | Short-Codes | Public Preview | Public Preview\* | Not Available | Not Available | +| USA & Puerto Rico | Local | - | - | General Availability | General Availability\* | +| USA | Short-Codes | Public Preview | Public Preview\* | - | - | \* Available through Azure Bot Framework and Dynamics only ## Customers with UK Azure billing addresses +| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | +| :----------------- | :------------- | :------------------- | :------------------- | :--------------- | :--------------- | +| UK | Toll-Free | - | - | Public Preview | Public Preview\* | +| UK | Local | - | - | Public Preview | Public Preview\* | +| USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | + +\* Available through Azure Bot Framework and Dynamics only + +## Customers with Ireland Azure billing addresses + +| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | +| :---------------- | :-------- | :------------------- | :------------------- | :------------------- | :--------------- | +| Ireland | Toll-Free | - | - | Public Preview | Public Preview\* | +| Ireland | Local | - | - | Public Preview | Public Preview\* | +| USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | +| UK | Toll-Free | - | - | Public Preview | Public Preview\* | +| UK | Local | - | - | Public Preview | Public Preview\* | + + +\* Available through Azure Bot Framework and Dynamics only + +## Customers with Denmark Azure billing addresses + | Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | | :---------------- | :-------- | :------------------- | :------------------- | :------------- | :--------------- | -| UK | Toll-Free | Not Available | Not Available | Public Preview | Public Preview\* | -| UK | Local | Not Available | Not Available | Public Preview | Public Preview\* | +| Denmark | Toll-Free | - | - | Public Preview | Public Preview\* | +| Denmark | Local | - | - | Public Preview | Public Preview\* | | USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | -| USA & Puerto Rico | Local | Not Available | Not Available | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | +| UK | Toll-Free | - | - | Public Preview | Public Preview\* | +| UK | Local | - | - | Public Preview | Public Preview\* | \* Available through Azure Bot Framework and Dynamics only -## Customers with Ireland Azure billing addresses +## Customers with Canada Azure billing addresses + +| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | +| :---------------- | :-------- | :------------------- | :------------------- | :------------- | :--------------- | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | +| USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | +| UK | Toll-Free | - | - | Public Preview | Public Preview\* | +| UK | Local | - | - | Public Preview | Public Preview\* | -| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | -| :---------------- | :-------- | :------------------- | :------------------- | :------------------- | :--------------------- | -| USA & Puerto Rico | Toll-Free | General Availability | General Availability | General Availability | General Availability\* | -| USA & Puerto Rico | Local | Not Available | Not Available | General Availability | General Availability\* | \* Available through Azure Bot Framework and Dynamics only -## Customers with Denmark Azure Billing Addresses +## Customers with Italy Azure billing addresses | Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | | :------ | :-------- | :------------ | :------------ | :------------- | :--------------- | -| Denmark | Toll-Free | Not Available | Not Available | Public Preview | Public Preview\* | -| Denmark | Local | Not Available | Not Available | Public Preview | Public Preview\* | +| Italy | Toll-Free** | - | - | Public Preview | Public Preview\* | +| Italy | Local** | - | - | Public Preview | Public Preview\* | + +\* Available through Azure Bot Framework and Dynamics only + +\** Allowing the purchase of Italian phone numbers for CSP and LSP customers is planned only for General Availability launch. + +## Customers with Sweden Azure billing addresses + +| Number | Type | Send SMS | Receive SMS | Make Calls | Receive Calls | +| :---------------- | :-------- | :------------------- | :------------------- | :------------- | :--------------- | +| Sweden | Toll-Free | - | - | Public Preview | Public Preview\* | +| Sweden | Local | - | - | Public Preview | Public Preview\* | +| Canada | Toll-Free | Public Preview | Public Preview | Public Preview | Public Preview\* | +| Canada | Local | - | - | Public Preview | Public Preview\* | | USA & Puerto Rico | Toll-Free | General Availability | General Availability | Public Preview | Public Preview\* | -| USA & Puerto Rico | Local | Not Available | Not Available | Public Preview | Public Preview\* | +| USA & Puerto Rico | Local | - | - | Public Preview | Public Preview\* | \* Available through Azure Bot Framework and Dynamics only diff --git a/articles/communication-services/concepts/pricing.md b/articles/communication-services/concepts/pricing.md index d7e5db7a6882..c9efdcb30fad 100644 --- a/articles/communication-services/concepts/pricing.md +++ b/articles/communication-services/concepts/pricing.md @@ -190,13 +190,47 @@ Rose sees the messages and starts chatting. In the meanwhile Casey gets a call a - Number of messages sent (20 + 30 + 18 + 30 + 25 + 35) x $0.0008 = $0.1264 -## SMS (Short Messaging Service) and Telephony +## SMS (Short Messaging Service) -Please refer to the following links for details on SMS and Telephony pricing +Azure Communication Services allows for adding SMS messaging capabilities to your applications. You can embed the experience into your applications using JavaScript, Java, Python, or .NET SDKs. Refer to our [full list of available SDKs](./sdk-options.md). -- [SMS Pricing Details](./sms-pricing.md) -- [PSTN Pricing Details](./pstn-pricing.md) +### Pricing + +The SMS usage price is a per-message segment charge based on the destination of the message. The carrier surcharge is calculated based on the destination of the message for sent messages and based on the sender of the message for received messages. Please refer to the [SMS Pricing Page](./sms-pricing.md) for pricing details. + +### Pricing example: 1:1 SMS sending + +Contoso is a healthcare company with clinics in US and Canada. Contoso has a Patient Appointment Reminder application that sends out SMS appointment reminders to patients regarding upcoming appointments. + +- The application sends appointment reminders to 20 US patients and 30 Canada patients using a US toll-free number. +- Message length of the reminder message is 150 chars < 1 message segment*. Hence, total sent messages are 20 message segments for US and 30 message segments for CA. + +**Cost calculations** + +- US - 20 message segments x $0.0075 per sent message segment + 20 message segments x $0.0025 carrier surcharge per sent message segment = $0.20 +- CA - 30 message segments x $0.0075 per sent message segment + 30 message segments x $0.0085 carrier surcharge per sent message segment = $0.48 + +**Total cost for the appointment reminders for 20 US patients and 30 CA patients**: $0.20 + $0.48 = $0.68 +### Pricing example: 1:1 SMS receiving + +Contoso is a healthcare company with clinics in US and Canada. Contoso has a Patient Appointment Reminder application that sends out SMS appointment reminders to patients regarding upcoming appointments. Patients can respond to the messages with "Reschedule" and include their date/time preference to reschedule their appointments. + +- The application sends appointment reminders to 20 US patients and 30 Canada patients using a CA toll-free number. +- 6 US patients and 4 CA patients respond back to reschedule their appointments. Contoso receives 10 SMS responses in total. +- Message length of the reschedule messages is less than 1 message segment*. Hence, total messages received are 6 message segments for US and 4 message segments for CA. + +**Cost calculations** + +- US - 6 message segments x $0.0075 per received message segment + 6 message segments x $0.0010 carrier surcharge per received message segment = $0.051 +- CA - 4 message segments x $0.0075 per received message segment = $0.03 + +**Total cost for receiving patient responses from 6 US patients and 4 CA patients**: $0.051 + $0.03 = $0.081 + +## Telephony +Please refer to the following links for details on Telephony pricing + +- [PSTN Pricing Details](./pstn-pricing.md) ## Next Steps Get started with Azure Communication Services: diff --git a/articles/communication-services/concepts/pstn-pricing.md b/articles/communication-services/concepts/pstn-pricing.md index b4a8ee9c9ce2..97608ec9d057 100644 --- a/articles/communication-services/concepts/pstn-pricing.md +++ b/articles/communication-services/concepts/pstn-pricing.md @@ -8,7 +8,7 @@ ms.date: 1/28/2022 ms.topic: conceptual ms.service: azure-communication-services --- -# Telephony (PSTN) Pricing +# Telephony (PSTN) pricing > [!IMPORTANT] > Number Retention and Portability: Phone numbers that are assigned to you during any preview program may need to be returned to Microsoft if you do not meet regulatory requirements before General Availability. During private preview and public preview, telephone numbers are not eligible for porting. [Details on offers in Public Preview / GA](../concepts/numbers/sub-eligibility-number-capability.md) @@ -19,15 +19,15 @@ In most cases, customers with Azure subscriptions locations that match the count All prices shown below are in USD. -## United States Telephony Offers +## United States telephony offers -### Phone Number Leasing Charges +### Phone number leasing charges |Number type |Monthly fee | |--------------|-----------| |Geographic |USD 1.00/mo | |Toll-Free |USD 2.00/mo | -### Usage Charges +### Usage charges |Number type |To make calls* |To receive calls| |--------------|-----------|------------| |Geographic |Starting at USD 0.0130/min |USD 0.0085/min | @@ -35,15 +35,15 @@ All prices shown below are in USD. \* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) -## United Kingdom Telephony Offers +## United Kingdom telephony offers -### Phone Number Leasing Charges +### Phone number leasing charges |Number type |Monthly fee | |--------------|-----------| |Geographic |USD 1.00/mo | |Toll-Free |USD 2.00/mo | -### Usage Charges +### Usage charges |Number type |To make calls* |To receive calls| |--------------|-----------|------------| |Geographic |Starting at USD 0.0150/min |USD 0.0090/min | @@ -51,15 +51,15 @@ All prices shown below are in USD. \* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) -## Denmark Telephony Offers +## Denmark telephony offers -### Phone Number Leasing Charges +### Phone number leasing charges |Number type |Monthly fee | |--------------|-----------| |Geographic |USD 0.82/mo | |Toll-Free |USD 25.00/mo | -### Usage Charges +### Usage charges |Number type |To make calls* |To receive calls| |--------------|-----------|------------| |Geographic |Starting at USD 0.0190/min |USD 0.0100/min | @@ -67,6 +67,70 @@ All prices shown below are in USD. \* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) +## Canada telephony offers + +### Phone number leasing charges +|Number type |Monthly fee | +|--------------|-----------| +|Geographic |USD 1.00/mo | +|Toll-Free |USD 2.00/mo | + +### Usage charges +|Number type |To make calls* |To receive calls| +|--------------|-----------|------------| +|Geographic |Starting at USD 0.0130/min |USD 0.0085/min | +|Toll-free |Starting at USD 0.0130/min |USD 0.0220/min | + +\* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) + +## Ireland telephony offers + +### Phone number leasing charges +|Number type |Monthly fee | +|--------------|-----------| +|Geographic |USD 1.50/mo | +|Toll-Free |USD 19.88/mo | + +### Usage charges +|Number type |To make calls* |To receive calls| +|--------------|-----------|------------| +|Geographic |Starting at USD 0.0160/min |USD 0.0100/min | +|Toll-free |Starting at USD 0.0160/min |Starting at USD 0.0448/min | + +\* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) + +## Italy telephony offers + +### Phone number leasing charges +|Number type |Monthly fee | +|--------------|-----------| +|Geographic |USD 2.92/mo | +|Toll-Free |USD 23.39/mo | + +### Usage charges +|Number type |To make calls* |To receive calls| +|--------------|-----------|------------| +|Geographic |Starting at USD 0.0160/min |USD 0.0100/min | +|Toll-free |Starting at USD 0.0160/min |USD 0.3415/min | + +\* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) + +## Sweden telephony offers + +### Phone number leasing charges +|Number type |Monthly fee | +|--------------|-----------| +|Geographic |USD 1.00/mo | +|Toll-Free |USD 21.05/mo | + +### Usage charges +|Number type |To make calls* |To receive calls| +|--------------|-----------|------------| +|Geographic |Starting at USD 0.0160/min |USD 0.0080/min | +|Toll-free |Starting at USD 0.0160/min |USD 0.1138/min | + +\* For destination-specific pricing for making outbound calls, please refer to details [here](https://github.com/Azure/Communication/blob/master/pricing/communication-services-pstn-rates.csv) + *** Note: Pricing for all countries is subject to change as pricing is market-based and depends on third-party suppliers of telephony services. Additionally, pricing may include requisite taxes and fees. diff --git a/articles/communication-services/concepts/reference.md b/articles/communication-services/concepts/reference.md index 0a2a88df41a2..c141b29e164f 100644 --- a/articles/communication-services/concepts/reference.md +++ b/articles/communication-services/concepts/reference.md @@ -24,6 +24,7 @@ For each area, we have external pages to track and review our SDKs. You can cons | Calling | [npm](https://www.npmjs.com/package/@azure/communication-calling) | - | - | - | [GitHub](https://github.com/Azure/Communication/releases) ([docs](/objectivec/communication-services/calling/)) | [Maven](https://search.maven.org/artifact/com.azure.android/azure-communication-calling/) | - | | Chat | [npm](https://www.npmjs.com/package/@azure/communication-chat) | [NuGet](https://www.nuget.org/packages/Azure.Communication.Chat) | [PyPi](https://pypi.org/project/azure-communication-chat/) | [Maven](https://search.maven.org/search?q=a:azure-communication-chat) | [GitHub](https://github.com/Azure/azure-sdk-for-ios/releases) | [Maven](https://search.maven.org/search?q=a:azure-communication-chat) | - | | Common | [npm](https://www.npmjs.com/package/@azure/communication-common) | [NuGet](https://www.nuget.org/packages/Azure.Communication.Common/) | N/A | [Maven](https://search.maven.org/search?q=a:azure-communication-common) | [GitHub](https://github.com/Azure/azure-sdk-for-ios/releases) | [Maven](https://search.maven.org/artifact/com.azure.android/azure-communication-common) | - | +| Email | [npm](https://www.npmjs.com/package/@azure/communication-email) | [NuGet](https://www.nuget.org/packages/Azure.Communication.Email) | - | - | - | - | - | | Identity | [npm](https://www.npmjs.com/package/@azure/communication-identity) | [NuGet](https://www.nuget.org/packages/Azure.Communication.Identity) | [PyPi](https://pypi.org/project/azure-communication-identity/) | [Maven](https://search.maven.org/search?q=a:azure-communication-identity) | - | - | - | | Network Traversal | [npm](https://www.npmjs.com/package/@azure/communication-network-traversal) | [NuGet](https://www.nuget.org/packages/Azure.Communication.NetworkTraversal) | [PyPi](https://pypi.org/project/azure-communication-networktraversal/) | [Maven](https://search.maven.org/search?q=a:azure-communication-networktraversal) | - | - | - | | Phone numbers | [npm](https://www.npmjs.com/package/@azure/communication-phone-numbers) | [NuGet](https://www.nuget.org/packages/Azure.Communication.phonenumbers) | [PyPi](https://pypi.org/project/azure-communication-phonenumbers/) | [Maven](https://search.maven.org/search?q=a:azure-communication-phonenumbers) | - | - | - | diff --git a/articles/communication-services/concepts/sms-pricing.md b/articles/communication-services/concepts/sms-pricing.md index 0272bca370a9..961f475c5ec9 100644 --- a/articles/communication-services/concepts/sms-pricing.md +++ b/articles/communication-services/concepts/sms-pricing.md @@ -12,7 +12,7 @@ zone_pivot_groups: acs-tollfree-shortcode # SMS Pricing > [!IMPORTANT] -> SMS messages can be sent to and received from United States phone numbers. Phone numbers located in other geographies are not yet supported by Communication Services SMS. +> SMS messages can be sent to and received from United States and Canada phone numbers. Phone numbers located in other geographies are not yet supported by Communication Services SMS. ::: zone pivot="tollfree" [!INCLUDE [Toll-Free](./includes/sms-tollfree-pricing.md)] @@ -30,7 +30,7 @@ In this quickstart, you learned how to send SMS messages using Azure Communicati > [Learn more about SMS](../concepts/sms/concepts.md) The following documents may be interesting to you: -- Familiarize yourself with the [SMS SDK](../concepts/sms/sdk-features.md) +- Familiarize yourself with one of the [SMS SDKs](../concepts/sms/sdk-features.md) - Get an SMS capable [phone number](../quickstarts/telephony/get-phone-number.md) - Get a [short code](../quickstarts/sms/apply-for-short-code.md) - [Phone number types in Azure Communication Services](../concepts/telephony/plan-solution.md) diff --git a/articles/communication-services/concepts/sms/sms-faq.md b/articles/communication-services/concepts/sms/sms-faq.md index 88a52a99765d..aa7a3b8f7e29 100644 --- a/articles/communication-services/concepts/sms/sms-faq.md +++ b/articles/communication-services/concepts/sms/sms-faq.md @@ -108,7 +108,7 @@ Rate Limits for SMS: ## Carrier Fees ### What are the carrier fees for SMS? -In July 2021, US carriers started charging an added fee for SMS messages sent and/or received from toll-free numbers and short codes. Carrier fees for SMS are charged per message segment based on the destination. Azure Communication Services charges a standard carrier fee per message segment. Carrier fees are subject to change by mobile carriers. Please refer to [SMS pricing](../sms-pricing.md) for more details. +US and CA carriers charge an added fee for SMS messages sent and/or received from toll-free numbers and short codes. The carrier surcharge is calculated based on the destination of the message for sent messages and based on the sender of the message for received messages. Azure Communication Services charges a standard carrier fee per message segment. Carrier fees are subject to change by mobile carriers. Please refer to [SMS pricing](../sms-pricing.md) for more details. ### When will we come to know of changes to these surcharges? As with similar Azure services, customers will be notified at least 30 days prior to the implementation of any price changes. These charges will be reflected on our SMS pricing page along with the effective dates. diff --git a/articles/communication-services/concepts/voice-video-calling/calling-sdk-features.md b/articles/communication-services/concepts/voice-video-calling/calling-sdk-features.md index 7fed98c431f8..fb7cdae9c461 100644 --- a/articles/communication-services/concepts/voice-video-calling/calling-sdk-features.md +++ b/articles/communication-services/concepts/voice-video-calling/calling-sdk-features.md @@ -107,7 +107,7 @@ The maximum call duration is 30 hours, participants that reach the maximum call ## JavaScript Calling SDK support by OS and browser -The following table represents the set of supported browsers which are currently available. **We support the most recent three versions of the browser** unless otherwise indicated. +The following table represents the set of supported browsers which are currently available. **We support the most recent three major versions of the browser (most recent three minor versions for Safari)** unless otherwise indicated. | Platform | Chrome | Safari | Edge (Chromium) | | ------------ | ------ | ------ | -------------- | diff --git a/articles/communication-services/concepts/voice-video-calling/closed-captions.md b/articles/communication-services/concepts/voice-video-calling/closed-captions.md index 37daae907e94..09e0f9270272 100644 --- a/articles/communication-services/concepts/voice-video-calling/closed-captions.md +++ b/articles/communication-services/concepts/voice-video-calling/closed-captions.md @@ -38,7 +38,7 @@ Here are main scenarios where Closed Captions are useful: ## Availability -The private preview will be available on all platforms. +Closed Captions are supported in Private Preview only in ACS to ACS calls on all platforms. - Android - iOS - Web diff --git a/articles/communication-services/includes/regional-availability-include.md b/articles/communication-services/includes/regional-availability-include.md index 1539e8e28a7f..b845b0f0335d 100644 --- a/articles/communication-services/includes/regional-availability-include.md +++ b/articles/communication-services/includes/regional-availability-include.md @@ -8,4 +8,4 @@ ms.custom: references_regions --- > [!IMPORTANT] -> The capabilities available (PSTN/SMS, Inbound/Outbound) depend on the country that you're operating within (your Azure billing address location), your use case, and the phone number type that you've selected. These capabilities vary by country due to regulatory requirements. For more information, visit the [Phone number types](../concepts/numbers/number-types.md) documentation. +> The capabilities available (PSTN/SMS, Inbound/Outbound) depend on the country that you're operating within (your Azure billing address location), your use case, and the phone number type that you've selected. These capabilities vary by country due to regulatory requirements. For more information, visit the [Subscription eligibility](../concepts/numbers/sub-eligibility-number-capability.md) documentation. diff --git a/articles/communication-services/quickstarts/email/create-email-communication-resource.md b/articles/communication-services/quickstarts/email/create-email-communication-resource.md index 39e624410cf5..d47969ea88a1 100644 --- a/articles/communication-services/quickstarts/email/create-email-communication-resource.md +++ b/articles/communication-services/quickstarts/email/create-email-communication-resource.md @@ -16,7 +16,7 @@ ms.custom: private_preview, event-tier1-build-2022 [!INCLUDE [Public Preview Notice](../../includes/public-preview-include.md)] -Get started with Email by provisioning your first Email Communication Services resource. Communication services resources can be provisioned through the [Azure portal](https://portal.azure.com) or with the .NET management client library. The management client library and the Azure portal allow you to create, configure, update and delete your resources and interface with [Azure Resource Manager](../../../azure-resource-manager/management/overview.md), Azure's deployment and management service. All functionality available in the client libraries is available in the Azure portal. +Get started with Email by provisioning your first Email Communication Services resource. Communication services resources can be provisioned through the [Azure portal](https://portal.azure.com/) or with the .NET management client library. The management client library and the Azure portal allow you to create, configure, update and delete your resources and interface with [Azure Resource Manager](../../../azure-resource-manager/management/overview.md), Azure's deployment and management service. All functionality available in the client libraries is available in the Azure portal. ## Create the Email Communications Service resource using portal diff --git a/articles/communication-services/quickstarts/email/includes/send-email-js.md b/articles/communication-services/quickstarts/email/includes/send-email-js.md index 1d1b4ae55392..efda25eef026 100644 --- a/articles/communication-services/quickstarts/email/includes/send-email-js.md +++ b/articles/communication-services/quickstarts/email/includes/send-email-js.md @@ -11,7 +11,7 @@ ms.service: azure-communication-services ms.custom: private_preview, event-tier1-build-2022 --- -Get started with Azure Communication Services by using the Communication Services C# Email client library to send Email messages. +Get started with Azure Communication Services by using the Communication Services JS Email client library to send Email messages. Completing this quick start incurs a small cost of a few USD cents or less in your Azure account. @@ -42,7 +42,7 @@ Run `npm init -y` to create a **package.json** file with default settings. npm init -y ``` -Use a text editor to create a file called **send-email.js** in the project root directory. You'll add all the source code for this quickstart to this file in the following sections. +Use a text editor to create a file called **send-email.js** in the project root directory. Change the "main" property in **package.json** to "send-email.js". You'll add all the source code for this quickstart to this file in the following sections. ### Install the package Use the `npm install` command to install the Azure Communication Services Email client library for JavaScript. @@ -59,27 +59,30 @@ The following classes and interfaces handle some of the major features of the Az | Name | Description | | --------------------| -----------------------------------------------------------------------------------------------------------------------------------------------------| -| EmailAddress | This class contains an email address and an option for a display name. | -| EmailAttachment | This class creates an email attachment by accepting a unique ID, email attachment type, and a string of content bytes. | +| EmailAddress | This interface contains an email address and an option for a display name. | +| EmailAttachment | This interface creates an email attachment by accepting a unique ID, email attachment type, and a string of content bytes. | | EmailClient | This class is needed for all email functionality. You instantiate it with your connection string and use it to send email messages. | -| EmailClientOptions | This class can be added to the EmailClient instantiation to target a specific API version. | -| EmailContent | This class contains the subject and the body of the email message. The importance can also be set within the EmailContent class. | -| EmailCustomHeader | This class allows for the addition of a name and value pair for a custom header. | -| EmailMessage | This class combines the sender, content, and recipients. Custom headers, attachments, and reply-to email addresses can optionally be added, as well. | -| EmailRecipients | This class holds lists of EmailAddress objects for recipients of the email message, including optional lists for CC & BCC recipients. | -| SendStatusResult | This class holds lists of status of the email message delivery. +| EmailClientOptions | This interface can be added to the EmailClient instantiation to target a specific API version. | +| EmailContent | This interface contains the subject, plaintext, and html of the email message. | +| EmailCustomHeader | This interface allows for the addition of a name and value pair for a custom header. | +| EmailMessage | This interface combines the sender, content, and recipients. Custom headers, importance, attachments, and reply-to email addresses can optionally be added, as well. | +| EmailRecipients | This interface holds lists of EmailAddress objects for recipients of the email message, including optional lists for CC & BCC recipients. | +| SendStatusResult | This interface holds the messageId and status of the email message delivery. ## Authenticate the client - Import the **EmailClient** from the client library and instantiate it with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). +Import the **EmailClient** from the client library and instantiate it with your connection string. + +The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING` using the dotenv package. Use the `npm install` command to install the dotenv package. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). + +```console +npm install dotenv +``` Add the following code to **send-email.js**: ```javascript -const { EmailRestApiClient } = require("@azure/communication-email"); -const communication_common = require("@azure/communication-common"); -const core_http = require("@azure/core-http"); -const uuid = require("uuid"); +const { EmailClient } = require("@azure/communication-email"); require("dotenv").config(); // This code demonstrates how to fetch your connection string @@ -93,7 +96,7 @@ To send an Email message, you need to - Add Recipients - Construct your email message with your Sender information you get your MailFrom address from your verified domain. - Include your Email Content and Recipients and include attachments if any -- Calling the SendEmail method: +- Calling the send method: Replace with your domain details and modify the content, recipient details as required @@ -101,37 +104,23 @@ Replace with your domain details and modify the content, recipient details as re async function main() { try { - const { url, credential } = communication_common.parseClientArguments(connectionString); - const options = {}; - options.userAgentOptions = {}; - options.userAgentOptions.userAgentPrefix = `azsdk-js-communication-email/1.0.0`; - const authPolicy = communication_common.createCommunicationAuthPolicy(credential); - const pipeline = core_http.createPipelineFromOptions(options, authPolicy); - this.api = new EmailRestApiClient(url, pipeline); + var client = new EmailClient(connectionString); //send mail - const unique_id = uuid.v4(); - const repeatabilityFirstSent = new Date().toUTCString(); const emailMessage = { sender: "", content: { subject: "Welcome to Azure Communication Service Email.", - body: { - plainText: "" - }, + plainText: "" }, recipients: { - toRecipients: [ + to: [ { - email: "emailalias@emaildomain.com>", + email: "", }, ], }, }; - var response = await this.api.email.sendEmail( - unique_id, - repeatabilityFirstSent, - emailMessage - ); + var response = await client.send(emailMessage); } catch (e) { console.log(e); } @@ -140,11 +129,10 @@ main(); ``` ## Getting MessageId to track email delivery -To track the status of email delivery, you need to get the MessageId back from response and track the status. If there's no MessageId retry the request. +To track the status of email delivery, you need to get the MessageId back from response and track the status. If there's no MessageId, retry the request. ```javascript - // check mail status, wait for 5 seconds, check for 60 seconds. - const messageId = response._response.parsedHeaders.xMsRequestId; + const messageId = response.messageId; if (messageId === null) { console.log("Message Id not found."); return; @@ -154,13 +142,12 @@ To track the status of email delivery, you need to get the MessageId back from r ## Getting status on email delivery To get the delivery status of email call GetMessageStatus API with MessageId ```javascript - - const context = this; + // check mail status, wait for 5 seconds, check for 60 seconds. let counter = 0; const statusInterval = setInterval(async function () { counter++; try { - const response = await context.api.email.getSendStatus(messageId); + const response = await client.getSendStatus(messageId); if (response) { console.log(`Email status for ${messageId}: ${response.status}`); if (response.status.toLowerCase() !== "queued" || counter > 12) { @@ -176,14 +163,9 @@ To get the delivery status of email call GetMessageStatus API with MessageId | Status Name | Description | | --------------------| -----------------------------------------------------------------------------------------------------------------------------------------------------| -| None | An email with this messageId couldn't be found. | | Queued | The email has been placed in the queue for delivery. | | OutForDelivery | The email is currently en route to its recipient(s). | -| InternalError | An error occurred internally during the delivery of this message. Try again. | | Dropped | The email message was dropped before the delivery could be successfully completed. | -| InvalidEmailAddress | The sender and/or recipient email address(es) is/are not valid. | -| InvalidAttachments | The content bytes string for the attachment isn't valid. | -| InvalidSenderDomain | The sender's email address domain isn't valid. | ## Run the code @@ -194,4 +176,4 @@ node ./send-email.js ``` ## Sample code -You can download the sample app from [GitHub](https://github.com/moirf/communication-services-javascript-quickstarts/tree/main/send-email) +You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-javascript-quickstarts/tree/main/send-email) diff --git a/articles/communication-services/quickstarts/email/includes/send-email-net.md b/articles/communication-services/quickstarts/email/includes/send-email-net.md index 39ddf326ddca..7044a034de00 100644 --- a/articles/communication-services/quickstarts/email/includes/send-email-net.md +++ b/articles/communication-services/quickstarts/email/includes/send-email-net.md @@ -90,17 +90,17 @@ The following classes and interfaces handle some of the major features of the Az | EmailCustomHeader | This class allows for the addition of a name and value pair for a custom header. | | EmailMessage | This class combines the sender, content, and recipients. Custom headers, attachments, and reply-to email addresses can optionally be added, as well. | | EmailRecipients | This class holds lists of EmailAddress objects for recipients of the email message, including optional lists for CC & BCC recipients. | -| SendStatusResult | This class holds lists of status of the email message delivery . | +| SendStatusResult | This class holds lists of status of the email message delivery. | ## Authenticate the client - Open **Program.cs** in a text editor and replace the body of the `Main` method with code to initialize an `EmailClient` with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage you resource's connection string](../../create-communication-resource.md#store-your-connection-string). + Open **Program.cs** in a text editor and replace the body of the `Main` method with code to initialize an `EmailClient` with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). ```csharp // This code demonstrates how to fetch your connection string // from an environment variable. string connectionString = Environment.GetEnvironmentVariable("COMMUNICATION_SERVICES_CONNECTION_STRING"); - +EmailClient emailClient = new EmailClient(connectionString); ``` ## Send an email message diff --git a/articles/communication-services/quickstarts/sms/handle-sms-events.md b/articles/communication-services/quickstarts/sms/handle-sms-events.md index c40827908402..206c73d17f5c 100644 --- a/articles/communication-services/quickstarts/sms/handle-sms-events.md +++ b/articles/communication-services/quickstarts/sms/handle-sms-events.md @@ -1,100 +1,110 @@ --- -title: Quickstart - Handle SMS events for Delivery Reports and Inbound Messages -titleSuffix: An Azure Communication Services quickstart -description: Learn how to handle SMS events using Azure Communication Services. +title: Quickstart - Handle SMS and delivery report events +titleSuffix: Azure Communication Services +description: "In this quickstart, you'll learn how to handle Azure Communication Services events. See how to create, receive, and subscribe to SMS and delivery report events." author: probableprime manager: chpalm services: azure-communication-services ms.author: rifox -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: quickstart ms.service: azure-communication-services ms.subservice: sms -ms.custom: mode-other +ms.custom: + - mode-other + - kr2b-contr-experiment --- -# Quickstart: Handle SMS events for Delivery Reports and Inbound Messages +# Quickstart: Handle SMS and delivery report events + +Get started with Azure Communication Services by using Azure Event Grid to handle Communication Services SMS events. After subscribing to SMS events such as inbound messages and delivery reports, you generate and receive these events. Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. [!INCLUDE [Regional Availability Notice](../../includes/regional-availability-include.md)] -Get started with Azure Communication Services by using Azure Event Grid to handle Communication Services SMS events. +## Prerequisites -## About Azure Event Grid +- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +- A Communication Services resource. For detailed information, see [Create an Azure Communication Services resource](../create-communication-resource.md). +- An SMS-enabled telephone number. [Get a phone number](../telephony/get-phone-number.md). -[Azure Event Grid](../../../event-grid/overview.md) is a cloud-based eventing service. In this article, you'll learn how to subscribe to events for [communication service events](../../../event-grid/event-schema-communication-services.md), and trigger an event to view the result. Typically, you send events to an endpoint that processes the event data and takes actions. In this article, we'll send the events to a web app that collects and displays the messages. +## About Event Grid -## Prerequisites -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- An Azure Communication Service resource. Further details can be found in the [Create an Azure Communication Services resource](../create-communication-resource.md) quickstart. -- An SMS enabled telephone number. [Get a phone number](../telephony/get-phone-number.md). +[Event Grid](../../../event-grid/overview.md) is a cloud-based eventing service. In this article, you'll learn how to subscribe to [communication service events](../../../event-grid/event-schema-communication-services.md), and trigger an event to view the result. Typically, you send events to an endpoint that processes the event data and takes actions. In this article, we'll send the events to a web app that collects and displays the messages. + +## Set up the environment + +To set up the environment that we'll use to generate and receive events, take the steps in the following sections. + +### Register an Event Grid resource provider -## Setting up +If you haven't previously used Event Grid in your Azure subscription, you might need to register your Event Grid resource provider. To register the provider, follow these steps: -### Enable Event Grid resource provider +1. Go to the Azure portal. +1. On the left menu, select **Subscriptions**. +1. Select the subscription that you use for Event Grid. +1. On the left menu, under **Settings**, select **Resource providers**. +1. Find **Microsoft.EventGrid**. +1. If your resource provider isn't registered, select **Register**. -If you haven't previously used Event Grid in your Azure subscription, you may need to register the Event Grid resource provider following the steps below: +It might take a moment for the registration to finish. Select **Refresh** to update the status. When **Registered** appears under **Status**, you're ready to continue. -In the Azure portal: +### Deploy the Event Grid viewer -1. Select **Subscriptions** on the left menu. -2. Select the subscription you're using for Event Grid. -3. On the left menu, under **Settings**, select **Resource providers**. -4. Find **Microsoft.EventGrid**. -5. If not registered, select **Register**. +For this quickstart, we'll use an Event Grid viewer to view events in near-real time. The viewer provides the user with the experience of a real-time feed. Also, the payload of each event should be available for inspection. -It may take a moment for the registration to finish. Select **Refresh** to update the status. When **Status** is **Registered**, you're ready to continue. +To set up the viewer, follow the steps in [Azure Event Grid Viewer](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/). -### Event Grid Viewer deployment +## Subscribe to SMS events by using web hooks -For this quickstart, we will use the [Azure Event Grid Viewer Sample](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) to view events in near-real time. This will provide the user with the experience of a real-time feed. In addition, the payload of each event should be available for inspection as well. +You can subscribe to specific events to provide Event Grid with information about where to send the events that you want to track. -## Subscribe to the SMS events using web hooks +1. In the portal, go to the Communication Services resource that you created. -In the portal, navigate to your Azure Communication Services Resource that you created. Inside the Communication Service resource, select **Events** from the left menu of the **Communication Services** page. +1. Inside the Communication Services resource, on the left menu of the **Communication Services** page, select **Events**. -:::image type="content" source="./media/handle-sms-events/select-events.png" alt-text="Screenshot showing selecting the event subscription button within a resource's events page."::: +1. Select **Add Event Subscription**. -Press **Add Event Subscription** to enter the creation wizard. + :::image type="content" source="./media/handle-sms-events/select-events.png" alt-text="Screenshot that shows the Events page of an Azure Communication Services resource. The Event Subscription button is called out."::: -On the **Create Event Subscription** page, Enter a **name** for the event subscription. +1. On the **Create Event Subscription** page, enter a **name** for the event subscription. -You can subscribe to specific events to tell Event Grid which of the SMS events you want to track, and where to send the events. Select the events you'd like to subscribe to from the dropdown menu. For SMS you'll have the option to choose `SMS Received` and `SMS Delivery Report Received`. +1. Under **Event Types**, select the events that you'd like to subscribe to. For SMS, you can choose `SMS Received` and `SMS Delivery Report Received`. -If you're prompted to provide a **System Topic Name**, feel free to provide a unique string. This field has no impact on your experience and is used for internal telemetry purposes. +1. If you're prompted to provide a **System Topic Name**, feel free to provide a unique string. This field has no impact on your experience and is used for internal telemetry purposes. -Check out the full list of [events supported by Azure Communication Services](../../../event-grid/event-schema-communication-services.md). + :::image type="content" source="./media/handle-sms-events/select-events-create-eventsub.png" alt-text="Screenshot that shows the Create Event Subscription dialog. Under Event Types, SMS Received and SMS Delivery Report Received are selected."::: -:::image type="content" source="./media/handle-sms-events/select-events-create-eventsub.png" alt-text="Screenshot showing the SMS Received and SMS Delivery Report Received event types being selected."::: +1. For **Endpoint type**, select **Web Hook**. -Select **Web Hook** for **Endpoint type**. + :::image type="content" source="./media/handle-sms-events/select-events-create-linkwebhook.png" alt-text="Screenshot that shows a detail of the Create Event Subscription dialog. In the Endpoint Type list, Web Hook is selected."::: -:::image type="content" source="./media/handle-sms-events/select-events-create-linkwebhook.png" alt-text="Screenshot showing the Endpoint Type field being set to Web Hook."::: +1. For **Endpoint**, select **Select an endpoint**, and then enter the URL of your web app. -For **Endpoint**, click **Select an endpoint**, and enter the URL of your web app. + In this case, we'll use the URL from the [Event Grid viewer](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) that we set up earlier in the quickstart. The URL for the sample has this format: `https://{{site-name}}.azurewebsites.net/api/updates` -In this case, we will use the URL from the [Azure Event Grid Viewer Sample](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) we set up earlier in the quickstart. The URL for the sample will be in the format: `https://{{site-name}}.azurewebsites.net/api/updates` +1. Select **Confirm Selection**. -Then select **Confirm Selection**. + :::image type="content" source="./media/handle-sms-events/select-events-create-selectwebhook-epadd.png" alt-text="Screenshot that shows the Select Web Hook dialog. The Subscriber Endpoint box contains a U R L, and a Confirm Selection button is visible."::: -:::image type="content" source="./media/handle-sms-events/select-events-create-selectwebhook-epadd.png" alt-text="Screenshot showing confirming a Web Hook Endpoint."::: +## View SMS events -## Viewing SMS events +To generate and receive SMS events, take the steps in the following sections. -### Triggering SMS events +### Trigger SMS events -To view event triggers, we must generate events in the first place. +To view event triggers, we need to generate some events. -- `SMS Received` events are generated when the Communication Services phone number receives a text message. To trigger an event, just send a message from your phone to the phone number attached to your Communication Services resource. -- `SMS Delivery Report Received` events are generated when you send an SMS to a user using a Communication Services phone number. To trigger an event, you are required to enable `Delivery Report` in the options of the [sent SMS](../sms/send.md). Try sending a message to your phone with `Delivery Report`. Completing this action incurs a small cost of a few USD cents or less in your Azure account. +- `SMS Received` events are generated when the Communication Services phone number receives a text message. To trigger an event, send a message from your phone to the phone number that's attached to your Communication Services resource. +- `SMS Delivery Report Received` events are generated when you send an SMS to a user by using a Communication Services phone number. To trigger an event, you need to turn on the `Delivery Report` option of the [SMS that you send](../sms/send.md). Try sending a message to your phone with `Delivery Report` turned on. Completing this action incurs a small cost of a few USD cents or less in your Azure account. -Check out the full list of [events supported by Azure Communication Services](../../../event-grid/event-schema-communication-services.md). +Check out the full list of [events that Communication Services supports](../../../event-grid/event-schema-communication-services.md). -### Receiving SMS events +### Receive SMS events -Once you complete either action above you will notice that `SMS Received` and `SMS Delivery Report Received` events are sent to your endpoint. These events will show up in the [Azure Event Grid Viewer Sample](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) we set up at the beginning. You can press the eye icon next to the event to see the entire payload. Events will look like this: +After you generate an event, you'll notice that `SMS Received` and `SMS Delivery Report Received` events are sent to your endpoint. These events show up in the [Event Grid viewer](/samples/azure-samples/azure-event-grid-viewer/azure-event-grid-viewer/) that we set up at the beginning of this quickstart. Select the eye icon next to the event to see the entire payload. Events should look similar to the following data: -:::image type="content" source="./media/handle-sms-events/sms-received.png" alt-text="Screenshot showing the Event Grid Schema for an SMS Received Event."::: +:::image type="content" source="./media/handle-sms-events/sms-received.png" alt-text="Screenshot of the Azure Event Grid viewer that shows the Event Grid schema for an SMS received event."::: -:::image type="content" source="./media/handle-sms-events/sms-delivery-report-received.png" alt-text="Screenshot showing the Event Grid Schema for an SMS Delivery Report Event."::: +:::image type="content" source="./media/handle-sms-events/sms-delivery-report-received.png" alt-text="Screenshot of the Azure Event Grid viewer that shows the Event Grid schema for an SMS delivery report event."::: Learn more about the [event schemas and other eventing concepts](../../../event-grid/event-schema-communication-services.md). @@ -109,7 +119,7 @@ In this quickstart, you learned how to consume SMS events. You can receive SMS m > [!div class="nextstepaction"] > [Send SMS](../sms/send.md) -You may also want to: +You might also want to: - [Learn about event handling concepts](../../../event-grid/event-schema-communication-services.md) - [Learn about Event Grid](../../../event-grid/overview.md) diff --git a/articles/communication-services/quickstarts/sms/includes/send-sms-java.md b/articles/communication-services/quickstarts/sms/includes/send-sms-java.md index fc28cc58107c..07e39346a26d 100644 --- a/articles/communication-services/quickstarts/sms/includes/send-sms-java.md +++ b/articles/communication-services/quickstarts/sms/includes/send-sms-java.md @@ -7,7 +7,7 @@ manager: ankita ms.service: azure-communication-services ms.subservice: azure-communication-services -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: include ms.custom: include file ms.author: pvicencio @@ -18,32 +18,34 @@ Get started with Azure Communication Services by using the Communication Service Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. > [!NOTE] -> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-java-quickstarts/tree/main/send-sms-quickstart) +> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-java-quickstarts/tree/main/send-sms-quickstart). ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- [Java Development Kit (JDK)](/java/azure/jdk/) version 8 or above. +- [Java Development Kit (JDK)](/java/azure/jdk/) version 8 or later. - [Apache Maven](https://maven.apache.org/download.cgi). - An active Communication Services resource and connection string. [Create a Communication Services resource](../../create-communication-resource.md). -- An SMS enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). +- An SMS-enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). ### Prerequisite check -- In a terminal or command window, run `mvn -v` to check that maven is installed. -- To view the phone numbers associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/), locate your Communication Services resource and open the **phone numbers** tab from the left navigation pane. +- In a terminal or command window, run `mvn -v` to check that Maven is installed. +- To view the phone numbers that are associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/) and locate your Communication Services resource. In the navigation pane on the left, select **Phone numbers**. -## Setting up +## Set up the application environment + +To set up an environment for sending messages, take the steps in the following sections. ### Create a new Java application -Open your terminal or command window and navigate to the directory where you would like to create your Java application. Run the command below to generate the Java project from the maven-archetype-quickstart template. +Open your terminal or command window and navigate to the directory where you would like to create your Java application. Run the following command to generate the Java project from the maven-archetype-quickstart template. ```console mvn archetype:generate -DgroupId=com.communication.quickstart -DartifactId=communication-quickstart -DarchetypeArtifactId=maven-archetype-quickstart -DarchetypeVersion=1.4 -DinteractiveMode=false ``` -The 'generate' goal will create a directory with the same name as the artifactId. Under this directory, the **src/main/java** directory contains the project source code, the **src/test/java directory** contains the test source, and the **pom.xml** file is the project's Project Object Model, or POM. +The `generate` goal creates a directory with the same name as the `artifactId` value. Under this directory, the **src/main/java** directory contains the project source code, the **src/test/java directory** contains the test source, and the **pom.xml** file is the project's Project Object Model (POM). ### Install the package @@ -59,7 +61,7 @@ Open the **pom.xml** file in your text editor. Add the following dependency elem ### Set up the app framework -Open **/src/main/java/com/communication/quickstart/App.java** in a text editor, add import directives and remove the `System.out.println("Hello world!");` statement: +Open **/src/main/java/com/communication/quickstart/App.java** in a text editor, add import directives, and remove the `System.out.println("Hello world!");` statement: ```java package com.communication.quickstart; @@ -74,7 +76,7 @@ public class App { public static void main( String[] args ) { - // Quickstart code goes here + // Quickstart code goes here. } } @@ -86,19 +88,19 @@ The following classes and interfaces handle some of the major features of the Az | Name | Description | | ---------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | -| SmsClientBuilder | This class creates the SmsClient. You provide it with endpoint, credential, and an http client. | +| SmsClientBuilder | This class creates the SmsClient. You provide it with an endpoint, a credential, and an HTTP client. | | SmsClient | This class is needed for all SMS functionality. You use it to send SMS messages. | -| SmsSendOptions | This class provides options to add custom tags and configure delivery reporting. If deliveryReportEnabled is set to true, then an event will be emitted when delivery was successful | +| SmsSendOptions | This class provides options to add custom tags and configure delivery reporting. If deliveryReportEnabled is set to true, an event is emitted when delivery is successful. | | SmsSendResult | This class contains the result from the SMS service. | ## Authenticate the client -Instantiate an `SmsClient` with your connection string. (Credential is the `Key` from the Azure portal. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). In addition, you can initialize the client with any custom HTTP client the implements the `com.azure.core.http.HttpClient` interface. +To authenticate a client, you instantiate an `SmsClient` with your connection string. For the credential, use the `Key` from the Azure portal. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). You can also initialize the client with any custom HTTP client that implements the `com.azure.core.http.HttpClient` interface. -Add the following code to the `main` method: +To instantiate a client, add the following code to the `main` method: ```java -// You can find your endpoint and access key from your resource in the Azure portal +// You can get your endpoint and access key from your resource in the Azure portal. String endpoint = "https://.communication.azure.com/"; AzureKeyCredential azureKeyCredential = new AzureKeyCredential(""); @@ -108,9 +110,9 @@ SmsClient smsClient = new SmsClientBuilder() .buildClient(); ``` -You can also provide the entire connection string using the connectionString() function instead of providing the endpoint and access key. +You can also provide the entire connection string by using the `connectionString` function instead of providing the endpoint and access key. ```java -// You can find your connection string from your resource in the Azure portal +// You can get your connection string from your resource in the Azure portal. String connectionString = "endpoint=https://.communication.azure.com/;accesskey="; SmsClient smsClient = new SmsClientBuilder() @@ -120,7 +122,7 @@ SmsClient smsClient = new SmsClientBuilder() ## Send a 1:1 SMS message -To send an SMS message to a single recipient, call the `send` method from the SmsClient with a single recipient phone number. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. +To send an SMS message to a single recipient, call the `send` method from the SmsClient with a single recipient phone number. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. ```java SmsSendResult sendResult = smsClient.send( @@ -133,13 +135,17 @@ System.out.println("Recipient Number: " + sendResult.getTo()); System.out.println("Send Result Successful:" + sendResult.isSuccessful()); ``` -You should replace `` with an SMS enabled phone number associated with your Communication Services resource and `` with a phone number you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` with a phone number that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. ## Send a 1:N SMS message with options -To send an SMS message to a list of recipients, call the `send` method with a list of recipient phone numbers. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. + +To send an SMS message to a list of recipients, call the `send` method with a list of recipient phone numbers. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. ```java SmsSendOptions options = new SmsSendOptions(); options.setDeliveryReportEnabled(true); @@ -159,37 +165,40 @@ for (SmsSendResult result : sendResults) { } ``` -You should replace `` with an SMS enabled phone number associated with your Communication Services resource and `` and `` with phone number(s) you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource +- Replace `` and `` with phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -The `setDeliveryReportEnabled` method is used to configure Delivery Reporting. This is useful for scenarios where you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure Delivery Reporting for your SMS messages. +The `setDeliveryReportEnabled` method is used to configure delivery reporting. This functionality is useful when you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure delivery reporting for your SMS messages. -The `setTag` method is used to apply a tag to the Delivery Report. +You can use the `setTag` method to apply a tag to the delivery report. ## Run the code -Navigate to the directory containing the **pom.xml** file and compile the project using the `mvn` command. +1. Navigate to the directory that contains the **pom.xml** file and compile the project by using the `mvn` command. -```console + ```console -mvn compile + mvn compile -``` + ``` -Then, build the package. +1. Build the package. -```console + ```console -mvn package + mvn package -``` + ``` -Run the following `mvn` command to execute the app. +1. Run the following `mvn` command to execute the app. -```console + ```console -mvn exec:java -Dexec.mainClass="com.communication.quickstart.App" -Dexec.cleanupDaemonThreads=false + mvn exec:java -Dexec.mainClass="com.communication.quickstart.App" -Dexec.cleanupDaemonThreads=false -``` + ``` diff --git a/articles/communication-services/quickstarts/sms/includes/send-sms-js.md b/articles/communication-services/quickstarts/sms/includes/send-sms-js.md index d8db07ed61cd..7b7328a392ed 100644 --- a/articles/communication-services/quickstarts/sms/includes/send-sms-js.md +++ b/articles/communication-services/quickstarts/sms/includes/send-sms-js.md @@ -7,7 +7,7 @@ manager: ankita ms.service: azure-communication-services ms.subservice: azure-communication-services -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: include ms.custom: include file ms.author: bertong @@ -18,37 +18,41 @@ Get started with Azure Communication Services by using the Communication Service Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. > [!NOTE] -> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-javascript-quickstarts/tree/main/send-sms) +> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-javascript-quickstarts/tree/main/send-sms). ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- [Node.js](https://nodejs.org/) Active LTS and Maintenance LTS versions (8.11.1 and 10.14.1 recommended). +- [Node.js](https://nodejs.org/) Active LTS and Maintenance LTS versions (8.11.1 and 10.14.1 are recommended). - An active Communication Services resource and connection string. [Create a Communication Services resource](../../create-communication-resource.md). -- An SMS enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). +- An SMS-enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). ### Prerequisite check - In a terminal or command window, run `node --version` to check that Node.js is installed. -- To view the phone numbers associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/), locate your Communication Services resource and open the **phone numbers** tab from the left navigation pane. +- To view the phone numbers that are associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/) and locate your Communication Services resource. In the navigation pane on the left, select **Phone numbers**. -## Setting up +## Set up the application environment -### Create a new Node.js Application +To set up an environment for sending messages, take the steps in the following sections. -First, open your terminal or command window, create a new directory for your app, and navigate to it. +### Create a new Node.js application -```console -mkdir sms-quickstart && cd sms-quickstart -``` +1. Open your terminal or command window, and then run the following command to create a new directory for your app and navigate to it. -Run `npm init -y` to create a **package.json** file with default settings. + ```console + mkdir sms-quickstart && cd sms-quickstart + ``` -```console -npm init -y -``` +1. Run the following command to create a **package.json** file with default settings. + + ```console + npm init -y + ``` + +1. Use a text editor to create a file called **send-sms.js** in the project root directory. -Use a text editor to create a file called **send-sms.js** in the project root directory. You'll add all the source code for this quickstart to this file in the following sections. +In the following sections, you'll add all the source code for this quickstart to the **send-sms.js** file that you just created. ### Install the package @@ -67,30 +71,34 @@ The following classes and interfaces handle some of the major features of the Az | Name | Description | | ------------------------------------- | ------------------------------------------------------------ | | SmsClient | This class is needed for all SMS functionality. You instantiate it with your subscription information, and use it to send SMS messages. | -| SmsSendRequest | This interface is the model for building the sms request (eg. configure the to and from phone numbers and the sms content). | -| SmsSendOptions | This interface provides options to configure delivery reporting. If `enableDeliveryReport` is set to `true`, then an event will be emitted when delivery is successful. | +| SmsSendRequest | This interface is the model for building the SMS request. You use it to configure the to and from phone numbers and the SMS content. | +| SmsSendOptions | This interface provides options for configuring delivery reporting. If `enableDeliveryReport` is set to `true`, an event is emitted when delivery is successful. | | SmsSendResult | This class contains the result from the SMS service. | ## Authenticate the client -Import the **SmsClient** from the SDK and instantiate it with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). +To authenticate a client, you import the **SmsClient** from the SDK and instantiate it with your connection string. You can retrieve the connection string for the resource from an environment variable. For instance, the code in this section retrieves the connection string from the `COMMUNICATION_SERVICES_CONNECTION_STRING` environment variable. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). -Create and open a file named **send-sms.js** and add the following code: +To import the client and instantiate it: + +1. Create a file named **send-sms.js**. + +1. Add the following code to **send-sms.js**. ```javascript const { SmsClient } = require('@azure/communication-sms'); -// This code demonstrates how to fetch your connection string +// This code retrieves your connection string // from an environment variable. const connectionString = process.env['COMMUNICATION_SERVICES_CONNECTION_STRING']; -// Instantiate the SMS client +// Instantiate the SMS client. const smsClient = new SmsClient(connectionString); ``` ## Send a 1:N SMS message -To send an SMS message to a list of recipients, call the `send` function from the SmsClient with a list of recipients phone numbers (if you wish to send a message to a single recipient, only include one number in the list). Add this code to the end of **send-sms.js**: +To send an SMS message to a list of recipients, call the `send` function from the SmsClient with a list of recipient phone numbers. If you'd like to send a message to a single recipient, include only one number in the list. Add this code to the end of **send-sms.js**: ```javascript async function main() { @@ -100,8 +108,8 @@ async function main() { message: "Hello World 👋🏻 via SMS" }); - // individual messages can encounter errors during sending - // use the "successful" property to verify + // Individual messages can encounter errors during sending. + // Use the "successful" property to verify the status. for (const sendResult of sendResults) { if (sendResult.successful) { console.log("Success: ", sendResult); @@ -113,14 +121,18 @@ async function main() { main(); ``` -You should replace `` with an SMS-enabled phone number associated with your Communication Services resource and `` and `` with the phone number(s) you wish to send a message to. + +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` and `` with the phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. ## Send a 1:N SMS message with options -You may also pass in an options object to specify whether the delivery report should be enabled and to set custom tags. +You can also provide an options object to specify whether the delivery report should be enabled and to set custom tags. ```javascript @@ -130,13 +142,13 @@ async function main() { to: ["", ""], message: "Weekly Promotion!" }, { - //Optional parameters + // Optional parameters enableDeliveryReport: true, tag: "marketing" }); - // individual messages can encounter errors during sending - // use the "successful" property to verify + // Individual messages can encounter errors during sending. + // Use the "successful" property to verify the status. for (const sendResult of sendResults) { if (sendResult.successful) { console.log("Success: ", sendResult); @@ -149,17 +161,20 @@ async function main() { main(); ``` -You should replace `` with an SMS-enabled phone number associated with your Communication Services resource and `` and `` with phone number(s) you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` and `` with phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -The `enableDeliveryReport` parameter is an optional parameter that you can use to configure Delivery Reporting. This is useful for scenarios where you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure Delivery Reporting for your SMS messages. -`tag` is an optional parameter that you can use to apply a tag to the Delivery Report. +The `enableDeliveryReport` parameter is an optional parameter that you can use to configure delivery reporting. This functionality is useful when you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure delivery reporting for your SMS messages. +The `tag` parameter is optional. You can use it to apply a tag to the delivery report. ## Run the code -Use the `node` command to run the code you added to the **send-sms.js** file. +Use the `node` command to run the code that you added to the **send-sms.js** file. ```console diff --git a/articles/communication-services/quickstarts/sms/includes/send-sms-net.md b/articles/communication-services/quickstarts/sms/includes/send-sms-net.md index 8bfd2498372d..6a5c43a9ab7b 100644 --- a/articles/communication-services/quickstarts/sms/includes/send-sms-net.md +++ b/articles/communication-services/quickstarts/sms/includes/send-sms-net.md @@ -7,7 +7,7 @@ manager: rejooyan ms.service: azure-communication-services ms.subservice: azure-communication-services -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: include ms.custom: include file ms.author: peiliu @@ -18,57 +18,59 @@ Get started with Azure Communication Services by using the Communication Service Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. > [!NOTE] -> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendSMS) +> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendSMS). ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- The latest version [.NET Core SDK](https://dotnet.microsoft.com/download/dotnet-core) for your operating system. +- The latest version of [.NET Core SDK](https://dotnet.microsoft.com/download/dotnet-core) for your operating system. - An active Communication Services resource and connection string. [Create a Communication Services resource](../../create-communication-resource.md). -- An SMS enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). +- An SMS-enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). ### Prerequisite check - In a terminal or command window, run the `dotnet` command to check that the .NET SDK is installed. -- To view the phone numbers associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/), locate your Communication Services resource and open the **phone numbers** tab from the left navigation pane. +- To view the phone numbers that are associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/) and locate your Communication Services resource. In the navigation pane on the left, select **Phone numbers**. -## Setting up +## Set up the application environment + +To set up an environment for sending messages, take the steps in the following sections. ### Create a new C# application -In a console window (such as cmd, PowerShell, or Bash), use the `dotnet new` command to create a new console app with the name `SmsQuickstart`. This command creates a simple "Hello World" C# project with a single source file: **Program.cs**. +1. In a console window, such as cmd, PowerShell, or Bash, use the `dotnet new` command to create a new console app with the name `SmsQuickstart`. This command creates a simple "Hello World" C# project with a single source file, **Program.cs**. -```console -dotnet new console -o SmsQuickstart -``` + ```console + dotnet new console -o SmsQuickstart + ``` -Change your directory to the newly created app folder and use the `dotnet build` command to compile your application. +1. Change your directory to the newly created app folder and use the `dotnet build` command to compile your application. -```console -cd SmsQuickstart -dotnet build -``` + ```console + cd SmsQuickstart + dotnet build + ``` ### Install the package -While still in the application directory, install the Azure Communication Services SMS SDK for .NET package by using the `dotnet add package` command. +1. While still in the application directory, install the Azure Communication Services SMS SDK for .NET package by using the following command. -```console -dotnet add package Azure.Communication.Sms --version 1.0.0 -``` + ```console + dotnet add package Azure.Communication.Sms --version 1.0.0 + ``` -Add a `using` directive to the top of **Program.cs** to include the `Azure.Communication` namespace. +1. Add a `using` directive to the top of **Program.cs** to include the `Azure.Communication` namespace. -```csharp + ```csharp -using System; -using System.Collections.Generic; + using System; + using System.Collections.Generic; -using Azure; -using Azure.Communication; -using Azure.Communication.Sms; + using Azure; + using Azure.Communication; + using Azure.Communication.Sms; -``` + ``` ## Object model @@ -77,16 +79,16 @@ The following classes and interfaces handle some of the major features of the Az | Name | Description | | ------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | SmsClient | This class is needed for all SMS functionality. You instantiate it with your subscription information, and use it to send SMS messages. | -| SmsSendOptions | This class provides options to configure delivery reporting. If enable_delivery_report is set to True, then an event will be emitted when delivery was successful | +| SmsSendOptions | This class provides options for configuring delivery reporting. If enable_delivery_report is set to True, an event is emitted when delivery is successful. | | SmsSendResult | This class contains the result from the SMS service. | ## Authenticate the client - Open **Program.cs** in a text editor and replace the body of the `Main` method with code to initialize an `SmsClient` with your connection string. The code below retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). +Open **Program.cs** in a text editor and replace the body of the `Main` method with code to initialize an `SmsClient` with your connection string. The following code retrieves the connection string for the resource from an environment variable named `COMMUNICATION_SERVICES_CONNECTION_STRING`. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). ```csharp -// This code demonstrates how to fetch your connection string +// This code retrieves your connection string // from an environment variable. string connectionString = Environment.GetEnvironmentVariable("COMMUNICATION_SERVICES_CONNECTION_STRING"); @@ -95,7 +97,7 @@ SmsClient smsClient = new SmsClient(connectionString); ## Send a 1:1 SMS message -To send an SMS message to a single recipient, call the `Send` or `SendAsync` function from the SmsClient. Add this code to the end of `Main` method in **Program.cs**: +To send an SMS message to a single recipient, call the `Send` or `SendAsync` function from the SmsClient. Add this code to the end of the `Main` method in **Program.cs**: ```csharp SmsSendResult sendResult = smsClient.Send( @@ -106,13 +108,18 @@ SmsSendResult sendResult = smsClient.Send( Console.WriteLine($"Sms id: {sendResult.MessageId}"); ``` -You should replace `` with an SMS-enabled phone number associated with your Communication Services resource and `` with the phone number you wish to send a message to. + +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` with the phone number that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. ## Send a 1:N SMS message with options -To send an SMS message to a list of recipients, call the `Send` or `SendAsync` function from the SmsClient with a list of recipient's phone numbers. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. + +To send an SMS message to a list of recipients, call the `Send` or `SendAsync` function from the SmsClient with a list of recipient phone numbers. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. ```csharp Response> response = smsClient.Send( @@ -132,14 +139,17 @@ foreach (SmsSendResult result in results) } ``` -You should replace `` with an SMS-enabled phone number associated with your Communication Services resource and `` and `` with phone number(s) you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your Communication Services resource. +- Replace `` and `` with phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -The `enableDeliveryReport` parameter is an optional parameter that you can use to configure Delivery Reporting. This is useful for scenarios where you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure Delivery Reporting for your SMS messages. +The `enableDeliveryReport` parameter is an optional parameter that you can use to configure delivery reporting. This functionality is useful when you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure delivery reporting for your SMS messages. -`Tag` is used to apply a tag to the Delivery Report +You can use the `Tag` parameter to apply a tag to the delivery report. ## Run the code @@ -149,6 +159,6 @@ Run the application from your application directory with the `dotnet run` comman dotnet run ``` -## Sample Code +## Sample code -You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendSMS) +You can download the sample app from [GitHub](https://github.com/Azure-Samples/communication-services-dotnet-quickstarts/tree/main/SendSMS). diff --git a/articles/communication-services/quickstarts/sms/includes/send-sms-python.md b/articles/communication-services/quickstarts/sms/includes/send-sms-python.md index 197ac29f05b9..e8e63ec2c03d 100644 --- a/articles/communication-services/quickstarts/sms/includes/send-sms-python.md +++ b/articles/communication-services/quickstarts/sms/includes/send-sms-python.md @@ -7,7 +7,7 @@ manager: ankita ms.service: azure-communication-services ms.subservice: azure-communication-services -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: include ms.custom: include file ms.author: lakshmans @@ -18,46 +18,50 @@ Get started with Azure Communication Services by using the Communication Service Completing this quickstart incurs a small cost of a few USD cents or less in your Azure account. > [!NOTE] -> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-python-quickstarts/tree/main/send-sms-quickstart) +> Find the finalized code for this quickstart on [GitHub](https://github.com/Azure-Samples/communication-services-python-quickstarts/tree/main/send-sms-quickstart). ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). - [Python](https://www.python.org/downloads/) 2.7 or 3.6+. - An active Communication Services resource and connection string. [Create a Communication Services resource](../../create-communication-resource.md). -- An SMS enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). +- An SMS-enabled telephone number. [Get a phone number](../../telephony/get-phone-number.md). ### Prerequisite check - In a terminal or command window, run the `python --version` command to check that Python is installed. -- To view the phone numbers associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/), locate your Communication Services resource and open the **phone numbers** tab from the left navigation pane. +- To view the phone numbers that are associated with your Communication Services resource, sign in to the [Azure portal](https://portal.azure.com/) and locate your Communication Services resource. In the navigation pane on the left, select **Phone numbers**. -## Setting up +## Set up the application environment + +To set up an environment for sending messages, take the steps in the following sections. ### Create a new Python application -Open your terminal or command window, create a new directory for your app, and navigate to it. +1. Open your terminal or command window. Then use the following command to create a new directory for your app and navigate to it. -```console -mkdir sms-quickstart && cd sms-quickstart -``` + ```console + mkdir sms-quickstart && cd sms-quickstart + ``` -Use a text editor to create a file called **send-sms.py** in the project root directory and add the structure for the program, including basic exception handling. You'll add all the source code for this quickstart to this file in the following sections. +1. Use a text editor to create a file called **send-sms.py** in the project root directory and add the structure for the program, including basic exception handling. -```python -import os -from azure.communication.sms import SmsClient + ```python + import os + from azure.communication.sms import SmsClient -try: - # Quickstart code goes here -except Exception as ex: - print('Exception:') - print(ex) -``` + try: + # Quickstart code goes here. + except Exception as ex: + print('Exception:') + print(ex) + ``` + +In the following sections, you'll add all the source code for this quickstart to the **send-sms.py** file that you just created. ### Install the package -While still in the application directory, install the Azure Communication Services SMS SDK for Python package by using the `pip install` command. +While still in the application directory, install the Azure Communication Services SMS SDK for Python package by using the following command. ```console pip install azure-communication-sms @@ -77,18 +81,18 @@ The following classes and interfaces handle some of the major features of the Az Instantiate an **SmsClient** with your connection string. Learn how to [manage your resource's connection string](../../create-communication-resource.md#store-your-connection-string). ```python -# Create the SmsClient object which will be used to send SMS messages +# Create the SmsClient object that you use to send SMS messages. sms_client = SmsClient.from_connection_string() ``` -For simplicity we are using connection strings in this quickstart, but in production environments we recommend using [service principals](../../../quickstarts/identity/service-principal.md). +For simplicity, this quickstart uses connection strings, but in production environments, we recommend using [service principals](../../../quickstarts/identity/service-principal.md). -## Send a 1:1 SMS Message +## Send a 1:1 SMS message -To send an SMS message to a single recipient, call the ```send``` method from the **SmsClient** with a single recipient phone number. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. Add this code to the end of `try` block in **send-sms.py**: +To send an SMS message to a single recipient, call the `send` method from the **SmsClient** with a single recipient phone number. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. Add this code to the end of the `try` block in **send-sms.py**: ```python -# calling send() with sms values +# Call send() with SMS values. sms_responses = sms_client.send( from_="", to="", @@ -98,18 +102,21 @@ sms_responses = sms_client.send( ``` -You should replace `` with an SMS enabled phone number associated with your communication service and `` with the phone number you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your communication service. +- Replace `` with the phone number that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -## Send a 1:N SMS Message +## Send a 1:N SMS message -To send an SMS message to a list of recipients, call the ```send``` method from the **SmsClient** with a list of recipient's phone numbers. You may also pass in optional parameters to specify whether the delivery report should be enabled and to set custom tags. Add this code to the end of `try` block in **send-sms.py**: +To send an SMS message to a list of recipients, call the `send` method from the **SmsClient** with a list of recipient phone numbers. You can also provide optional parameters to specify whether the delivery report should be enabled and to set custom tags. Add this code to the end of the `try` block in **send-sms.py**: ```python -# calling send() with sms values +# Call send() with SMS values. sms_responses = sms_client.send( from_="", to=["", ""], @@ -119,25 +126,29 @@ sms_responses = sms_client.send( ``` -You should replace `` with an SMS enabled phone number associated with your communication service and `` `` with phone number(s) you wish to send a message to. +Make these replacements in the code: + +- Replace `` with an SMS-enabled phone number that's associated with your communication service. +- Replace `` and `` with phone numbers that you'd like to send a message to. > [!WARNING] -> Note that phone numbers should be provided in E.164 international standard format (e.g.: +14255550123). The **From** phone number may be a Short Code as well (e.g.: 23456). +> Provide phone numbers in E.164 international standard format, for example, +14255550123. The value for `` can also be a short code, for example, 23456. -## Optional Parameters +## Optional parameters -The `enable_delivery_report` parameter is an optional parameter that you can use to configure Delivery Reporting. This is useful for scenarios where you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure Delivery Reporting for your SMS messages. +The `enable_delivery_report` parameter is an optional parameter that you can use to configure delivery reporting. This functionality is useful when you want to emit events when SMS messages are delivered. See the [Handle SMS Events](../handle-sms-events.md) quickstart to configure delivery reporting for your SMS messages. -The `tag` parameter is an optional parameter that you can use to apply a tag to the Delivery Report. +The `tag` parameter is an optional parameter that you can use to apply a tag to the delivery report. ## Run the code + Run the application from your application directory with the `python` command. ```console python send-sms.py ``` -The complete Python script should look something like: +The complete Python script should look something like the following code: ```python @@ -145,9 +156,9 @@ import os from azure.communication.sms import SmsClient try: - # Create the SmsClient object which will be used to send SMS messages + # Create the SmsClient object that you use to send SMS messages. sms_client = SmsClient.from_connection_string("") - # calling send() with sms values + # Call send() with SMS values. sms_responses = sms_client.send( from_="", to="", diff --git a/articles/communication-services/quickstarts/sms/send.md b/articles/communication-services/quickstarts/sms/send.md index 41ed5380d2c1..37341e9840de 100644 --- a/articles/communication-services/quickstarts/sms/send.md +++ b/articles/communication-services/quickstarts/sms/send.md @@ -1,16 +1,20 @@ --- title: Quickstart - Send an SMS message -titleSuffix: An Azure Communication Services quickstart -description: Learn how to send an SMS message using Azure Communication Services. +titleSuffix: Azure Communication Services +description: "In this quickstart, you'll learn how to send an SMS message by using Azure Communication Services. See code examples in C#, JavaScript, Java, and Python." author: probableprime manager: chpalm services: azure-communication-services ms.author: rifox -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.topic: quickstart ms.service: azure-communication-services ms.subservice: sms -ms.custom: tracking-python, devx-track-js, mode-other +ms.custom: + - tracking-python + - devx-track-js + - mode-other + - kr2b-contr-experiment zone_pivot_groups: acs-js-csharp-java-python --- # Quickstart: Send an SMS message @@ -18,8 +22,8 @@ zone_pivot_groups: acs-js-csharp-java-python [!INCLUDE [Regional Availability Notice](../../includes/regional-availability-include.md)] > [!IMPORTANT] -> SMS messages can be sent to and received from United States phone numbers. Phone numbers located in other geographies are not yet supported by Communication Services SMS. -> For more information, see **[Phone number types](../../concepts/telephony/plan-solution.md)**. +> SMS messages can be sent to and received from United States phone numbers. Phone numbers that are located in other geographies are not yet supported by Azure Communication Services SMS. +> For more information, see [Phone number types](../../concepts/telephony/plan-solution.md).

    >[!VIDEO https://www.youtube.com/embed/YEyxSZqzF4o] @@ -50,7 +54,7 @@ If you want to clean up and remove a Communication Services subscription, you ca ## Next steps -In this quickstart, you learned how to send SMS messages using Azure Communication Services. +In this quickstart, you learned how to send SMS messages by using Communication Services. > [!div class="nextstepaction"] > [Receive SMS and Delivery Report Events](./handle-sms-events.md) diff --git a/articles/communication-services/quickstarts/voice-video-calling/includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md b/articles/communication-services/quickstarts/voice-video-calling/includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md index 7fdecf044e5f..c1a13c86953a 100644 --- a/articles/communication-services/quickstarts/voice-video-calling/includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md +++ b/articles/communication-services/quickstarts/voice-video-calling/includes/custom-teams-endpoint/voice-video-calling-cte-javascript.md @@ -28,10 +28,10 @@ mkdir calling-quickstart && cd calling-quickstart ### Install the package Use the `npm install` command to install the Azure Communication Services Calling SDK for JavaScript. > [!IMPORTANT] -> This quickstart uses the Azure Communication Services Calling SDK version `1.3.2-beta.1`. +> This quickstart uses the Azure Communication Services Calling SDK version `1.5.4-beta.1`. ```console npm install @azure/communication-common --save -npm install @azure/communication-calling@1.3.2-beta.1 --save +npm install @azure/communication-calling@1.5.4-beta.1 --save ``` ### Set up the app framework This quickstart uses webpack to bundle the application assets. Run the following command to install the `webpack`, `webpack-cli` and `webpack-dev-server` npm packages and list them as development dependencies in your `package.json`: @@ -408,4 +408,4 @@ On the first tab, enter the Azure Communication Services user identity of the se From the second tab, select the "Accept Call" button. The call will be answered and connected. Tabs should show the similar result like the following image: :::image type="content" source="../../media/javascript/1-on-1-video-calling-d.png" alt-text="Screenshot is showing two tabs, with ongoing call between two Teams users, each logged in the individual tab." lightbox="../../media/javascript/1-on-1-video-calling-d.png"::: -Both tabs are now successfully in a 1:1 video call. Both users can hear each other's audio and see each other video stream. \ No newline at end of file +Both tabs are now successfully in a 1:1 video call. Both users can hear each other's audio and see each other video stream. diff --git a/articles/communication-services/toc.yml b/articles/communication-services/toc.yml index 1fc4edf08a3f..cdc1145080c5 100644 --- a/articles/communication-services/toc.yml +++ b/articles/communication-services/toc.yml @@ -96,6 +96,8 @@ items: items: - name: Virtual visit scenarios href: tutorials/virtual-visits.md + - name: Virtual event scenarios + href: tutorials/events-playbook.md - name: Use Postman to send SMS messages href: tutorials/postman-tutorial.md - name: Sign an HTTP request with HMAC using C# @@ -105,7 +107,7 @@ items: - name: Prepare a Node.js web app for Calling href: tutorials/building-app-start.md - name: Export SDK telemetry to Application Insights - href: quickstarts/telemetry-application-insights.md + href: quickstarts/telemetry-application-insights.md - name: Add file sharing to your application with UI Library href: tutorials/file-sharing-tutorial.md - name: Concepts @@ -254,6 +256,8 @@ items: href: concepts/privacy.md - name: Pricing href: concepts/pricing.md + - name: Email Pricing + href: concepts/email-pricing.md - name: SMS Pricing href: concepts/sms-pricing.md - name: PSTN Pricing diff --git a/articles/communication-services/tutorials/events-playbook.md b/articles/communication-services/tutorials/events-playbook.md new file mode 100644 index 000000000000..25c0bf7909ec --- /dev/null +++ b/articles/communication-services/tutorials/events-playbook.md @@ -0,0 +1,122 @@ +--- +title: Build a custom event management platform with Microsoft Teams, Graph and Azure Communication Services +titleSuffix: An Azure Communication Services tutorial +description: Learn how to use Microsoft Teams, Graph and Azure Communication Services to build a custom event management platform. +author: ddematheu2 +manager: chpalm +services: azure-communication-services + +ms.author: dademath +ms.date: 03/31/2022 +ms.topic: tutorial +ms.service: azure-communication-services +ms.subservice: teams-interop +--- + +# Build a custom event management platform with Microsoft Teams, Graph and Azure Communication Services + +The goal of this document is to reduce the time it takes for Event Management Platforms to apply the power of Microsoft Teams Webinars through integration with Graph APIs and ACS UI Library. The target audience is developers and decision makers. To achieve the goal, this document provides the following two functions: 1) an aid to help event management platforms quickly decide what level of integration would be right for them, and 2) a step-by-step end-to-end QuickStart to speed up implementation. + +## What are virtual events and event management platforms? + +Microsoft empowers event platforms to integrate event capabilities using [Microsoft Teams](/microsoftteams/quick-start-meetings-live-events), [Graph](/graph/api/application-post-onlinemeetings?tabs=http&view=graph-rest-beta) and [Azure Communication Services](../overview.md). Virtual Events are a communication modality where event organizers schedule and configure a virtual environment for event presenters and participants to engage with content through voice, video, and chat. Event management platforms enable users to configure events and for attendees to participate in those events, within their platform, applying in-platform capabilities and gamification. Learn more about[ Teams Meetings, Webinars and Live Events](/microsoftteams/quick-start-meetings-live-events) that are used throughout this article to enable virtual event scenarios. + +## What are the building blocks of an event management platform? + +Event platforms require three core building blocks to deliver a virtual event experience. + +### 1. Event Scheduling and Management + +To get started, event organizers must schedule and configure the event. This process creates the virtual container that event attendees and presenters will enter to interact. As part of configuration, organizers might choose to add registration requirements for the event. Microsoft provides two patterns for organizers to create events: + +- Teams Client (Web or Desktop): Organizers can directly create events using their Teams client where they can choose a time and place, configure registration, and send to a list of attendees. + +- Microsoft Graph: Programmatically, event platforms can schedule and configure a Teams event on behalf of a user by using their Microsoft 365 license. + +### 2. Attendee experience + +For event attendees, they are presented with an experience that enables them to attend, participate, and engage with an event’s content. This experience might include capabilities like watching content, sharing their camera stream, asking questions, responding to polls, and more. Microsoft provides two options for attendees to consume events powered by Teams and Azure Communication Services: + +- Teams Client (Web or Desktop): Attendees can directly join events using a Teams Client by using a provided join link. They get access to the full Teams experience. + +- Azure Communication Services: Attendees can join events through a custom client powered by [Azure Communication Services](../overview.md) using [Teams Interoperability](../concepts/join-teams-meeting.md). This client can be directly embedded into an Event Platform so that attendees never need to leave the experience. This experience can be built from the ground up using Azure Communication Services SDKs for [calling](../quickstarts/voice-video-calling/get-started-teams-interop.md?pivots=platform-web) and [chat](../quickstarts/chat/meeting-interop.md?pivots=platform-web) or by applying our low-code [UI Library](../quickstarts/ui-library/get-started-composites.md?pivots=platform-web&tabs=kotlin). + +### 3. Host & Organizer experience + +Event hosts and organizers require the ability to present content, manage attendees (mute, change roles, etc.) and manage the event (start, end, etc.). + +- Teams Client (Web or Desktop): Presenters can join using the fully fledged Teams client for web or mobile. The Teams client provides presenters a full set of capabilities to deliver their content. Learn more about [presenter capabilities for Teams](https://support.microsoft.com/office/present-in-a-live-event-in-teams-d58fc9db-ff5b-4633-afb3-b4b2ddef6c0a). + +## Building a custom solution for event management with Azure Communication Services and Microsoft Graph + +Throughout the rest of this tutorial, we will focus on how using Azure Communication Services and Microsoft Graph to build a custom event management platform. We will be using the sample architecture below. Based on that architecture we will be focusing on setting up scheduling and registration flows and embedding the attendee experience right on the event platform to join the event. + +:::image type="content" source="./media/event-management-platform-architecture.svg" alt-text="Diagram showing sample architecture for event management platform"::: + +## Leveraging Microsoft Graph to schedule events and register attendees + +Microsoft Graph enables event management platforms to empower organizers to schedule and manage their events directly through the event management platform. For attendees, event management platforms can build custom registration flows right on their platform that registers the attendee for the event and generates unique credentials for them to join the Teams hosted event. + +>[!NOTE] +>For each required Graph API has different required scopes, ensure that your application has the correct scopes to access the data. + +### Scheduling registration-enabled events with Microsoft Graph + +1. Authorize application to use Graph APIs on behalf of service account. This authorization is required in order to have the application use credentials to interact with your tenant to schedule events and register attendees. + + 1. Create an account that will own the meetings and is branded appropriately. This is the account that will create the events and which will receive notifications for it. We recommend to not user a personal production account given the overhead it might incur in the form of remainders. + + 1. As part of the application setup, the service account is used to login into the solution once. With this permission the application can retrieve and store an access token on behalf of the service account that will own the meetings. Your application will need to store the tokens generated from the login and place them in a secure location such as a key vault. The application will need to store both the access token and the refresh token. Learn more about [auth tokens](../../active-directory/develop/access-tokens.md). and [refresh tokens](../../active-directory/develop/refresh-tokens.md). + + 1. The application will require "on behalf of" permissions with the [offline scope](../../active-directory/develop/v2-permissions-and-consent.md#offline_access) to act on behalf of the service account for the purpose of creating meetings. Individual Graph APIs require different scopes, learn more in the links detailed below as we introduce the required APIs. + + 1. Refresh tokens can be revoked in the event of a breach or account termination + + >[!NOTE] + >Authorization is required by both developers for testing and organizers who will be using your event platform to set up their events. + +2. Organizer logins to Contoso platform to create an event and generate a registration URL. To enable these capabilities developers should use: + + 1. The [Create Calendar Event API](/graph/api/user-post-events?tabs=http&view=graph-rest-1.0) to POST the new event to be created. The Event object returned will contain the join URL required for the next step. Need to set the following parameter: `isonlinemeeting: true` and `onlineMeetingProvider: "teamsForBusiness"`. Set a time zone for the event, using the `Prefer` header. + + 1. Next, use the [Create Online Meeting API](/graph/api/application-post-onlinemeetings?tabs=http&view=graph-rest-beta) to `GET` the online meeting information using the join URL generated from the step above. The `OnlineMeeting` object will contain the `meetingId` required for the registration steps. + + 1. By using these APIs, developers are creating a calendar event to show up in the Organizer’s calendar and the Teams online meeting where attendees will join. + +>[!NOTE] +>Known issue with double calendar entries for organizers when using the Calendar and Online Meeting APIs. + +3. To enable registration for an event, Contoso can use the [External Meeting Registration API](/graph/api/resources/externalmeetingregistration?view=graph-rest-beta) to POST. The API requires Contoso to pass in the `meetingId` of the `OnlineMeeting` created above. Registration is optional. You can set options on who can register. + +### Register attendees with Microsoft Graph + +Event management platforms can use a custom registration flow to register attendees. This flow is powered by the [External Meeting Registrant API](/graph/api/externalmeetingregistrant-post?tabs=http&view=graph-rest-beta). By using the API Contoso will receive a unique `Teams Join URL` for each attendee. This URL will be used as part of the attendee experience either through Teams or Azure Communication Services to have the attendee join the meeting. + +### Communicate with your attendees using Azure Communication Services + +Through Azure Communication Services, developers can use SMS and Email capabilities to send remainders to attendees for the event they have registered. Communication can also include confirmation for the event as well as information for joining and participating. +- [SMS capabilities](../quickstarts/sms/send.md) enable you to send text messages to your attendees. +- [Email capabilities](../quickstarts/email/send-email.md) support direct communication to your attendees using custom domains. + +### Leverage Azure Communication Services to build a custom attendee experience + +>[!NOTE] +> Limitations when using Azure Communication Services as part of a Teams Webinar experience. Please visit our [documentation for more details.](../concepts/join-teams-meeting.md#limitations-and-known-issues) + +Attendee experience can be directly embedded into an application or platform using [Azure Communication Services](../overview.md) so that your attendees never need to leave your platform. It provides low-level calling and chat SDKs which support [interoperability with Teams Events](../concepts/teams-interop.md), as well as a turn-key UI Library which can be used to reduce development time and easily embed communications. Azure Communication Services enables developers to have flexibility with the type of solution they need. Review [limitations](../concepts/join-teams-meeting.md#limitations-and-known-issues) of using Azure Communication Services for webinar scenarios. + +1. To start, developers can leverage Microsoft Graph APIs to retrieve the join URL. This URL is provided uniquely per attendee during [registration](/graph/api/externalmeetingregistrant-post?tabs=http&view=graph-rest-beta). Alternatively, it can be [requested for a given meeting](/graph/api/onlinemeeting-get?tabs=http&view=graph-rest-beta). + +2. Before developers dive into using [Azure Communication Services](../overview.md), they must [create a resource](../quickstarts/create-communication-resource.md?pivots=platform-azp&tabs=windows). + +3. Once a resource is created, developers must [generate access tokens](../quickstarts/access-tokens.md?pivots=programming-language-javascript) for attendees to access Azure Communication Services. We recommend using a [trusted service architecture](../concepts/client-and-server-architecture.md). + +4. Developers can leverage [headless SDKs](../concepts/teams-interop.md) or [UI Library](https://azure.github.io/communication-ui-library/) using the join link URL to join the Teams meeting through [Teams Interoperability](../concepts/teams-interop.md). Details below: + +|Headless SDKs | UI Library | +|----------------------------------------|---------------------------------------| +| Developers can leverage the [calling](../quickstarts/voice-video-calling/get-started-teams-interop.md?pivots=platform-javascript) and [chat](../quickstarts/chat/meeting-interop.md?pivots=platform-javascript) SDKs to join a Teams meeting with your custom client | Developers can choose between the [call + chat](https://azure.github.io/communication-ui-library/?path=/docs/composites-meeting-basicexample--basic-example) or pure [call](https://azure.github.io/communication-ui-library/?path=/docs/composites-call-basicexample--basic-example) and [chat](https://azure.github.io/communication-ui-library/?path=/docs/composites-chat-basicexample--basic-example) composites to build their experience. Alternatively, developers can leverage [composable components](https://azure.github.io/communication-ui-library/?path=/docs/quickstarts-uicomponents--page) to build a custom Teams interop experience.| + + +>[!NOTE] +>Azure Communication Services is a consumption-based service billed through Azure. For more information on pricing visit our resources. \ No newline at end of file diff --git a/articles/communication-services/tutorials/file-sharing-tutorial.md b/articles/communication-services/tutorials/file-sharing-tutorial.md index d207747681af..2cd15cefd41b 100644 --- a/articles/communication-services/tutorials/file-sharing-tutorial.md +++ b/articles/communication-services/tutorials/file-sharing-tutorial.md @@ -15,6 +15,8 @@ ms.subservice: chat # Enable file sharing using UI Library and Azure Blob Storage +[!INCLUDE [Public Preview Notice](../includes/public-preview-include.md)] + In this tutorial, we'll be configuring the Azure Communication Services UI Library Chat Composite to enable file sharing. The UI Library Chat Composite provides a set of rich components and UI controls that can be used to enable file sharing. We will be leveraging Azure Blob Storage to enable the storage of the files that are shared through the chat thread. >[!IMPORTANT] @@ -52,7 +54,7 @@ The diagram below shows a typical flow of a file sharing scenario for both uploa ## Setup File Storage using Azure Blob -You can follow the tutorial [Upload file to Azure Blob Storage with an Azure Function](https://docs.microsoft.com/azure/developer/javascript/how-to/with-web-app/azure-function-file-upload) to write the backend code required for file sharing. +You can follow the tutorial [Upload file to Azure Blob Storage with an Azure Function](/azure/developer/javascript/how-to/with-web-app/azure-function-file-upload) to write the backend code required for file sharing. Once implemented, you can call this Azure Function inside the `uploadHandler` function to upload files to Azure Blob Storage. For the remaining of the tutorial, we will assume you have generated the function using the tutorial for Azure Blob Storage linked above. @@ -330,4 +332,4 @@ You may also want to: - [Add chat to your app](../quickstarts/chat/get-started.md) - [Creating user access tokens](../quickstarts/access-tokens.md) - [Learn about client and server architecture](../concepts/client-and-server-architecture.md) -- [Learn about authentication](../concepts/authentication.md) +- [Learn about authentication](../concepts/authentication.md) \ No newline at end of file diff --git a/articles/communication-services/tutorials/media/event-management-platform-architecture.svg b/articles/communication-services/tutorials/media/event-management-platform-architecture.svg new file mode 100644 index 000000000000..97c877c73b59 --- /dev/null +++ b/articles/communication-services/tutorials/media/event-management-platform-architecture.svg @@ -0,0 +1,2064 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Export + + + + + Box + text + + + + + + + text + + Sheet.1 + + + + User.2072 + User + + Sheet.3 + + Sheet.4 + + + + + Sheet.5 + + Sheet.6 + + + + + + + User + + + Web App (Was Websites).2077 + Client Application + + Sheet.8 + + Sheet.9 + + + + + Sheet.10 + + Sheet.11 + + Sheet.12 + + Sheet.13 + + + + + Sheet.14 + + + + Sheet.15 + + Sheet.16 + + + + + Sheet.17 + + Sheet.18 + + + + + Sheet.19 + + Sheet.20 + + + + + Sheet.21 + + Sheet.22 + + + + + Sheet.23 + + Sheet.24 + + + + + Sheet.25 + + Sheet.26 + + + + + Sheet.27 + + + + Sheet.28 + + + + + Sheet.29 + + Sheet.30 + + + + + Sheet.31 + + Sheet.32 + + + + + Sheet.33 + + Sheet.34 + + + + + + + + Client Application + + + Dynamic connector.2105 + + + + Sheet.36 + + + + + + Mobile App (Was Mobile Services).2106 + + Sheet.38 + + + + Sheet.39 + + + + + Sheet.40 + 4 + + + + 4 + + Sheet.41 + Azure Communication Services + + + + Azure Communication Services + + User.2111 + Presenter + + Sheet.43 + + Sheet.44 + + + + + Sheet.45 + + Sheet.46 + + + + + + + Presenter + + + Dynamic connector.2116 + + + + Sheet.48 + 5 + + + + 5 + + Teams.2120 + Teams + + + + + Sheet.50 + + Sheet.51 + + + + + + + Sheet.52 + + + + + + + Sheet.53 + + + + + + + Sheet.54 + + + + + + + Sheet.55 + + + + + + + Sheet.56 + + + + + + + Sheet.57 + + + + + + + Sheet.58 + + + + + + + + + + Teams + + + Teams.2130 + M365 Calendar (Graph) + + + + + Sheet.60 + + Sheet.61 + + + + + + + Sheet.62 + + + + + + + Sheet.63 + + + + + + + Sheet.64 + + + + + + + Sheet.65 + + + + + + + Sheet.66 + + + + + + + Sheet.67 + + + + + + + Sheet.68 + + + + + + + + + + M365 Calendar (Graph) + + + Dynamic connector.2140 + + + + Sheet.70 + 3 + + + + 3 + + Dynamic connector.2142 + + + + Sheet.72 + Voice, Video & Text Communication + + + + Voice, Video & Text Communication + + Sheet.73 + Schedule Event + + + + Schedule Event + + Sheet.74 + Start Event + + + + Start Event + + Sheet.75 + Join Event + + + + Join Event + + User.2147 + Organizer + + Sheet.77 + + Sheet.78 + + + + + Sheet.79 + + Sheet.80 + + + + + + + Organizer + + + Sheet.81 + Microsoft Graph + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Microsoft Graph + + Dynamic connector.2152 + + + + + + + + + + + + + + + + + + + Dynamic connector.2153 + + + + + + + + + + + + + + + + + + + Sheet.84 + 1 + + + + 1 + + Sheet.85 + + + + + + Sheet.86 + + + + + + Dynamic connector.2155 + + + + + + + + + + + + + + + + + + + Sheet.88 + Register + + + + Register + + Sheet.89 + 2 + + + + 2 + + Web App (Was Websites).2158 + Client Application + + Sheet.91 + + Sheet.92 + + + + + Sheet.93 + + Sheet.94 + + Sheet.95 + + Sheet.96 + + + + + Sheet.97 + + + + Sheet.98 + + Sheet.99 + + + + + Sheet.100 + + Sheet.101 + + + + + Sheet.102 + + Sheet.103 + + + + + Sheet.104 + + Sheet.105 + + + + + Sheet.106 + + Sheet.107 + + + + + Sheet.108 + + Sheet.109 + + + + + Sheet.110 + + + + Sheet.111 + + + + + Sheet.112 + + Sheet.113 + + + + + Sheet.114 + + Sheet.115 + + + + + Sheet.116 + + Sheet.117 + + + + + + + + Client Application + + + Dynamic connector.2186 + + + + Sheet.119 + 2 + + + + 2 + + Sheet.120 + Email remainder + + + + Email remainder + + diff --git a/articles/container-apps/azure-resource-manager-api-spec.md b/articles/container-apps/azure-resource-manager-api-spec.md index 59b61a78bb46..ad7d8de95917 100644 --- a/articles/container-apps/azure-resource-manager-api-spec.md +++ b/articles/container-apps/azure-resource-manager-api-spec.md @@ -5,7 +5,7 @@ services: container-apps author: craigshoemaker ms.service: container-apps ms.topic: reference -ms.date: 05/13/2022 +ms.date: 05/26/2022 ms.author: cshoe ms.custom: ignite-fall-2021, event-tier1-build-2022 --- @@ -230,7 +230,7 @@ The following example ARM template deploys a container app. "name": "[parameters('containerappName')]", "location": "[parameters('location')]", "identity": { - "type": "None" + "type": "None" }, "properties": { "managedEnvironmentId": "[resourceId('Microsoft.App/managedEnvironments', parameters('environment_name'))]", @@ -290,43 +290,44 @@ The following example ARM template deploys a container app. "cpu": 0.5, "memory": "1Gi" }, - "probes":[ + "probes": [ { - "type":"liveness", - "httpGet":{ - "path":"/health", - "port":8080, - "httpHeaders":[ - { - "name":"Custom-Header", - "value":"liveness probe" - }] - }, - "initialDelaySeconds":7, - "periodSeconds":3 + "type": "liveness", + "httpGet": { + "path": "/health", + "port": 8080, + "httpHeaders": [ + { + "name": "Custom-Header", + "value": "liveness probe" + } + ] + }, + "initialDelaySeconds": 7, + "periodSeconds": 3 }, { - "type":"readiness", - "tcpSocket": - { - "port": 8081 - }, - "initialDelaySeconds": 10, - "periodSeconds": 3 + "type": "readiness", + "tcpSocket": { + "port": 8081 + }, + "initialDelaySeconds": 10, + "periodSeconds": 3 }, { - "type": "startup", - "httpGet": { - "path": "/startup", - "port": 8080, - "httpHeaders": [ - { - "name": "Custom-Header", - "value": "startup probe" - }] - }, - "initialDelaySeconds": 3, - "periodSeconds": 3 + "type": "startup", + "httpGet": { + "path": "/startup", + "port": 8080, + "httpHeaders": [ + { + "name": "Custom-Header", + "value": "startup probe" + } + ] + }, + "initialDelaySeconds": 3, + "periodSeconds": 3 } ], "volumeMounts": [ @@ -421,13 +422,13 @@ properties: probes: - type: liveness httpGet: - - path: "/health" - port: 8080 - httpHeaders: - - name: "Custom-Header" - value: "liveness probe" - initialDelaySeconds: 7 - periodSeconds: 3 + path: "/health" + port: 8080 + httpHeaders: + - name: "Custom-Header" + value: "liveness probe" + initialDelaySeconds: 7 + periodSeconds: 3 - type: readiness tcpSocket: - port: 8081 @@ -435,11 +436,11 @@ properties: periodSeconds: 3 - type: startup httpGet: - - path: "/startup" - port: 8080 - httpHeaders: - - name: "Custom-Header" - value: "startup probe" + path: "/startup" + port: 8080 + httpHeaders: + - name: "Custom-Header" + value: "startup probe" initialDelaySeconds: 3 periodSeconds: 3 scale: diff --git a/articles/container-apps/disaster-recovery.md b/articles/container-apps/disaster-recovery.md index 347a1c7d3ceb..b8e52fac3ac8 100644 --- a/articles/container-apps/disaster-recovery.md +++ b/articles/container-apps/disaster-recovery.md @@ -19,7 +19,7 @@ In the unlikely event of a full region outage, you have the option of using one - **Manual recovery**: Manually deploy to a new region, or wait for the region to recover, and then manually redeploy all environments and apps. -- **Resilient recovery**: First, deploy your container apps in advance to multiple regions. Next, use Azure Front Door or Azure Traffic Manager to handle incoming requests, pointing traffic to your primary region. Then, should an outage occur, you can redirect traffic away from the affected region. See [Cross-region replication in Azure](/azure/availability-zones/cross-region-replication-azure) for more information. +- **Resilient recovery**: First, deploy your container apps in advance to multiple regions. Next, use Azure Front Door or Azure Traffic Manager to handle incoming requests, pointing traffic to your primary region. Then, should an outage occur, you can redirect traffic away from the affected region. See [Cross-region replication in Azure](../availability-zones/cross-region-replication-azure.md) for more information. > [!NOTE] > Regardless of which strategy you choose, make sure your deployment configuration files are in source control so you can easily redeploy if necessary. @@ -27,4 +27,4 @@ In the unlikely event of a full region outage, you have the option of using one Additionally, the following resources can help you create your own disaster recovery plan: - [Failure and disaster recovery for Azure applications](/azure/architecture/reliability/disaster-recovery) -- [Azure resiliency technical guidance](/azure/architecture/checklist/resiliency-per-service) +- [Azure resiliency technical guidance](/azure/architecture/checklist/resiliency-per-service) \ No newline at end of file diff --git a/articles/container-apps/firewall-integration.md b/articles/container-apps/firewall-integration.md index 6d41af67f5cc..cb64b589f137 100644 --- a/articles/container-apps/firewall-integration.md +++ b/articles/container-apps/firewall-integration.md @@ -14,7 +14,7 @@ ms.author: jennylaw Firewall settings Network Security Groups (NSGs) needed to configure virtual networks closely resemble the settings required by Kubernetes. -Some outbound dependencies of Azure Kubernetes Service (AKS) clusters rely exclusively on fully qualified domain names (FQDN), therefore securing an AKS cluster purely with NSGs isn't possible. Refer to [Control egress traffic for cluster nodes in Azure Kubernetes Service](/azure/aks/limit-egress-traffic) for details. +Some outbound dependencies of Azure Kubernetes Service (AKS) clusters rely exclusively on fully qualified domain names (FQDN), therefore securing an AKS cluster purely with NSGs isn't possible. Refer to [Control egress traffic for cluster nodes in Azure Kubernetes Service](../aks/limit-egress-traffic.md) for details. * You can lock down a network via NSGs with more restrictive rules than the default NSG rules. * To fully secure a cluster, use a combination of NSGs and a firewall. @@ -65,4 +65,4 @@ As the following rules require allowing all IPs, use a Firewall solution to lock | `dc.services.visualstudio.com` | HTTPS | `443` | This endpoint is used for metrics and monitoring using Azure Monitor. | | `*.ods.opinsights.azure.com` | HTTPS | `443` | This endpoint is used by Azure Monitor for ingesting log analytics data. | | `*.oms.opinsights.azure.com` | HTTPS | `443` | This endpoint is used by `omsagent`, which is used to authenticate the log analytics service. | -| `*.monitoring.azure.com` | HTTPS | `443` | This endpoint is used to send metrics data to Azure Monitor. | +| `*.monitoring.azure.com` | HTTPS | `443` | This endpoint is used to send metrics data to Azure Monitor. | \ No newline at end of file diff --git a/articles/container-apps/get-started-existing-container-image.md b/articles/container-apps/get-started-existing-container-image.md index d2848e352340..f39eae7b1f31 100644 --- a/articles/container-apps/get-started-existing-container-image.md +++ b/articles/container-apps/get-started-existing-container-image.md @@ -189,7 +189,8 @@ az monitor log-analytics query \ ```powershell $LOG_ANALYTICS_WORKSPACE_CLIENT_ID=(az containerapp env show --name $CONTAINERAPPS_ENVIRONMENT --resource-group $RESOURCE_GROUP --query properties.appLogsConfiguration.logAnalyticsConfiguration.customerId --out tsv) -az monitor log-analytics query \ + +az monitor log-analytics query ` --workspace $LOG_ANALYTICS_WORKSPACE_CLIENT_ID ` --analytics-query "ContainerAppConsoleLogs_CL | where ContainerAppName_s == 'my-container-app' | project ContainerAppName_s, Log_s, TimeGenerated" ` --out table diff --git a/articles/container-apps/microservices-dapr.md b/articles/container-apps/microservices-dapr.md index 4538fbe19a05..08dd31e09298 100644 --- a/articles/container-apps/microservices-dapr.md +++ b/articles/container-apps/microservices-dapr.md @@ -116,11 +116,13 @@ az storage account create \ # [PowerShell](#tab/powershell) -```powershell -New-AzStorageAccount -ResourceGroupName $RESOURCE_GROUP ` - -Name $STORAGE_ACCOUNT ` - -Location $LOCATION ` - -SkuName Standard_RAGRS +```azurecli +az storage account create ` + --name $STORAGE_ACCOUNT ` + --resource-group $RESOURCE_GROUP ` + --location "$LOCATION" ` + --sku Standard_RAGRS ` + --kind StorageV2 ``` --- @@ -135,11 +137,10 @@ STORAGE_ACCOUNT_KEY=`az storage account keys list --resource-group $RESOURCE_GRO # [PowerShell](#tab/powershell) -```powershell -$STORAGE_ACCOUNT_KEY=(Get-AzStorageAccountKey -ResourceGroupName $RESOURCE_GROUP -AccountName $STORAGE_ACCOUNT)| Where-Object -Property KeyName -Contains 'key1' | Select-Object -ExpandProperty Value +```azurecli +$STORAGE_ACCOUNT_KEY=(az storage account keys list --resource-group $RESOURCE_GROUP --account-name $STORAGE_ACCOUNT --query '[0].value' --out tsv) ``` - --- ### Configure the state store component @@ -196,7 +197,7 @@ az containerapp env dapr-component set \ # [PowerShell](#tab/powershell) -```powershell +```azurecli az containerapp env dapr-component set ` --name $CONTAINERAPPS_ENVIRONMENT --resource-group $RESOURCE_GROUP ` --dapr-component-name statestore ` @@ -325,11 +326,14 @@ az monitor log-analytics query \ # [PowerShell](#tab/powershell) -```powershell -$LOG_ANALYTICS_WORKSPACE_CLIENT_ID=(az containerapp env show --name $CONTAINERAPPS_ENVIRONMENT --resource-group $RESOURCE_GROUP --query properties.appLogsConfiguration.logAnalyticsConfiguration.customerId --out tsv) +```azurecli +$LOG_ANALYTICS_WORKSPACE_CLIENT_ID=` +(az containerapp env show --name $CONTAINERAPPS_ENVIRONMENT --resource-group $RESOURCE_GROUP --query properties.appLogsConfiguration.logAnalyticsConfiguration.customerId --out tsv) -$queryResults = Invoke-AzOperationalInsightsQuery -WorkspaceId $LOG_ANALYTICS_WORKSPACE_CLIENT_ID -Query "ContainerAppConsoleLogs_CL | where ContainerAppName_s == 'nodeapp' and (Log_s contains 'persisted' or Log_s contains 'order') | project ContainerAppName_s, Log_s, TimeGenerated | take 5" -$queryResults.Results +az monitor log-analytics query ` + --workspace $LOG_ANALYTICS_WORKSPACE_CLIENT_ID ` + --analytics-query "ContainerAppConsoleLogs_CL | where ContainerAppName_s == 'nodeapp' and (Log_s contains 'persisted' or Log_s contains 'order') | project ContainerAppName_s, Log_s, TimeGenerated | take 5" ` + --out table ``` --- @@ -359,8 +363,9 @@ az group delete \ # [PowerShell](#tab/powershell) -```powershell -Remove-AzResourceGroup -Name $RESOURCE_GROUP -Force +```azurecli +az group delete ` + --resource-group $RESOURCE_GROUP ``` --- diff --git a/articles/container-apps/networking.md b/articles/container-apps/networking.md index f552667a33f2..4a1dc7ad8cda 100644 --- a/articles/container-apps/networking.md +++ b/articles/container-apps/networking.md @@ -11,7 +11,7 @@ ms.author: cshoe # Networking architecture in Azure Container Apps -Azure Container Apps run in the context of an [environment](environment.md), which is supported by a virtual network (VNET). When you create an environment, you can provide a custom VNET, otherwise a VNET is automatically generated for you. Generated VNETs are inaccessible to you as they're created in Microsoft's tenent. To take full control over your VNET, provide an existing VNET to Container Apps as you create your environment. +Azure Container Apps run in the context of an [environment](environment.md), which is supported by a virtual network (VNET). When you create an environment, you can provide a custom VNET, otherwise a VNET is automatically generated for you. Generated VNETs are inaccessible to you as they're created in Microsoft's tenant. To take full control over your VNET, provide an existing VNET to Container Apps as you create your environment. The following articles feature step-by-step instructions for creating Container Apps environments with different accessibility levels. @@ -36,7 +36,7 @@ As you create a custom VNET, keep in mind the following situations: - Each [revision](revisions.md) is assigned an IP address in the subnet. - You can restrict inbound requests to the environment exclusively to the VNET by deploying the environment as [internal](vnet-custom-internal.md). -As you begin to design the network around your container app, refer to [Plan virtual networks](/azure/virtual-network/virtual-network-vnet-plan-design-arm) for important concerns surrounding running virtual networks on Azure. +As you begin to design the network around your container app, refer to [Plan virtual networks](../virtual-network/virtual-network-vnet-plan-design-arm.md) for important concerns surrounding running virtual networks on Azure. :::image type="content" source="media/networking/azure-container-apps-virtual-network.png" alt-text="Diagram of how Azure Container Apps environments use an existing V NET, or you can provide your own."::: @@ -97,7 +97,7 @@ Once you're satisfied with the latest revision, you can lock traffic to that rev #### Update existing revision -Consider a situation where you have a known good revision that's serving 100% of your traffic, but you want to issue and update to your app. You can deploy and test new revisions using their direct endpoints without affecting the main revision serving the app. +Consider a situation where you have a known good revision that's serving 100% of your traffic, but you want to issue an update to your app. You can deploy and test new revisions using their direct endpoints without affecting the main revision serving the app. Once you're satisfied with the updated revision, you can shift a portion of traffic to the new revision for testing and verification. @@ -197,4 +197,4 @@ When you deploy an internal or an external environment into your own network, a ## Next steps - [Deploy with an external environment](vnet-custom.md) -- [Deploy with an internal environment](vnet-custom-internal.md) +- [Deploy with an internal environment](vnet-custom-internal.md) \ No newline at end of file diff --git a/articles/container-apps/quickstart-code-to-cloud.md b/articles/container-apps/quickstart-code-to-cloud.md index 95941a3d6bc2..6121bd17a9dc 100644 --- a/articles/container-apps/quickstart-code-to-cloud.md +++ b/articles/container-apps/quickstart-code-to-cloud.md @@ -28,7 +28,7 @@ To complete this project, you'll need the following items: | Requirement | Instructions | |--|--| -| Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). You need the *Contributor* or *Owner* permission on the Azure subscription to proceed.

    Refer to [Assign Azure roles using the Azure portal](/azure/role-based-access-control/role-assignments-portal?tabs=current) for details. | +| Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). You need the *Contributor* or *Owner* permission on the Azure subscription to proceed.

    Refer to [Assign Azure roles using the Azure portal](../role-based-access-control/role-assignments-portal.md?tabs=current) for details. | | GitHub Account | Sign up for [free](https://github.com/join). | | git | [Install git](https://git-scm.com/downloads) | | Azure CLI | Install the [Azure CLI](/cli/azure/install-azure-cli).| @@ -39,7 +39,7 @@ To complete this project, you'll need the following items: | Requirement | Instructions | |--|--| -| Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). You need the *Contributor* or *Owner* permission on the Azure subscription to proceed. Refer to [Assign Azure roles using the Azure portal](/azure/role-based-access-control/role-assignments-portal?tabs=current) for details. | +| Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). You need the *Contributor* or *Owner* permission on the Azure subscription to proceed. Refer to [Assign Azure roles using the Azure portal](../role-based-access-control/role-assignments-portal.md?tabs=current) for details. | | GitHub Account | Sign up for [free](https://github.com/join). | | git | [Install git](https://git-scm.com/downloads) | | Azure CLI | Install the [Azure CLI](/cli/azure/install-azure-cli).| @@ -166,7 +166,7 @@ az acr create ` ## Build your application -With [ACR tasks](/azure/container-registry/container-registry-tasks-overview), you can build and push the docker image for the album API without installing Docker locally. +With [ACR tasks](../container-registry/container-registry-tasks-overview.md), you can build and push the docker image for the album API without installing Docker locally. ### Build the container with ACR diff --git a/articles/container-apps/scale-app.md b/articles/container-apps/scale-app.md index 1df7504a8456..beee78717c18 100644 --- a/articles/container-apps/scale-app.md +++ b/articles/container-apps/scale-app.md @@ -20,18 +20,18 @@ There are two scale properties that apply to all rules in your container app: | Scale property | Description | Default value | Min value | Max value | |---|---|---|---|---| -| `minReplicas` | Minimum number of replicas running for your container app. | 0 | 0 | 10 | -| `maxReplicas` | Maximum number of replicas running for your container app. | n/a | 1 | 10 | +| `minReplicas` | Minimum number of replicas running for your container app. | 0 | 0 | 30 | +| `maxReplicas` | Maximum number of replicas running for your container app. | 10 | 1 | 30 | - If your container app scales to zero, then you aren't billed. - Individual scale rules are defined in the `rules` array. - If you want to ensure that an instance of your application is always running, set `minReplicas` to 1 or higher. - Replicas not processing, but that remain in memory are billed in the "idle charge" category. -- Changes to scaling rules are a [revision-scope](overview.md) change. -- When using non-HTTP event scale rules, setting the `properties.configuration.activeRevisionsMode` property of the container app to `single` is recommended. - - - +- Changes to scaling rules are a [revision-scope](revisions.md#revision-scope-changes) change. +- It's recommended to set the `properties.configuration.activeRevisionsMode` property of the container app to `single`, when using non-HTTP event scale rules. +- Container Apps implements the KEDA ScaledObject with the following default settings. + - pollingInterval: 30 seconds + - cooldownPeriod: 300 seconds ## Scale triggers @@ -47,7 +47,7 @@ With an HTTP scaling rule, you have control over the threshold that determines w | Scale property | Description | Default value | Min value | Max value | |---|---|---|---|---| -| `concurrentRequests`| Once the number of requests exceeds this then another replica is added. Replicas will continue to be added up to the `maxReplicas` amount as the number of concurrent requests increase. | 10 | 1 | n/a | +| `concurrentRequests`| When the number of requests exceeds this value, then another replica is added. Replicas will continue to be added up to the `maxReplicas` amount as the number of concurrent requests increase. | 10 | 1 | n/a | In the following example, the container app scales out up to five replicas and can scale down to zero. The scaling threshold is set to 100 concurrent requests per second. @@ -99,13 +99,13 @@ In the following example, the container app scales out up to five replicas and c :::image type="content" source="media/scalers/http-scale-rule.png" alt-text="A screenshot showing how to add an h t t p scale rule."::: -1. Select **Create** when you are done. +1. Select **Create** when you're done. :::image type="content" source="media/scalers/create-http-scale-rule.png" alt-text="A screenshot showing the newly created http scale rule."::: ## Event-driven -Container Apps can scale based of a wide variety of event types. Any event supported by [KEDA](https://keda.sh/docs/scalers/), is supported in Container Apps. +Container Apps can scale based of a wide variety of event types. Any event supported by [KEDA](https://keda.sh/docs/scalers/) is supported in Container Apps. Each event type features different properties in the `metadata` section of the KEDA definition. Use these properties to define a scale rule in Container Apps. @@ -132,7 +132,7 @@ The container app scales according to the following behavior: ... "scale": { "minReplicas": "0", - "maxReplicas": "10", + "maxReplicas": "30", "rules": [ { "name": "queue-based-autoscaling", @@ -162,7 +162,7 @@ To create a custom scale trigger, first create a connection string secret to aut 1. Select **Add**, and then enter your secret key/value information. -1. Select **Add** when you are done. +1. Select **Add** when you're done. :::image type="content" source="media/scalers/connection-string.png" alt-text="A screenshot showing how to create a connection string."::: @@ -178,11 +178,11 @@ To create a custom scale trigger, first create a connection string secret to aut :::image type="content" source="media/scalers/add-scale-rule.png" alt-text="A screenshot showing how to add a scale rule."::: -1. Enter a **Rule name**, select **Custom** and enter a **Custom rule type**. Enter your **Secret reference** and **Trigger parameter** and then add your **Metadata** parameters. select **Add** when you are done. +1. Enter a **Rule name**, select **Custom** and enter a **Custom rule type**. Enter your **Secret reference** and **Trigger parameter** and then add your **Metadata** parameters. select **Add** when you're done. :::image type="content" source="media/scalers/custom-scaler.png" alt-text="A screenshot showing how to configure a custom scale rule."::: -1. Select **Create** when you are done. +1. Select **Create** when you're done. > [!NOTE] > In multiple revision mode, adding a new scale trigger creates a new revision of your application but your old revision remains available with the old scale rules. Use the **Revision management** page to manage their traffic allocations. @@ -207,7 +207,7 @@ Azure Container Apps supports KEDA ScaledObjects and all of the available [KEDA ... "scale": { "minReplicas": "0", - "maxReplicas": "10", + "maxReplicas": "30", "rules": [ { "name": "", @@ -224,9 +224,9 @@ Azure Container Apps supports KEDA ScaledObjects and all of the available [KEDA } ``` -The following is an example of setting up an [Azure Storage Queue](https://keda.sh/docs/scalers/azure-storage-queue/) scaler that you can configure to auto scale based on Azure Storage Queues. +The following YAML is an example of setting up an [Azure Storage Queue](https://keda.sh/docs/scalers/azure-storage-queue/) scaler that you can configure to auto scale based on Azure Storage Queues. -Below is the KEDA trigger specification for an Azure Storage Queue. To set up a scale rule in Azure Container Apps, you will need the trigger `type` and any other required parameters. You can also add other optional parameters which vary based on the scaler you are using. +Below is the KEDA trigger specification for an Azure Storage Queue. To set up a scale rule in Azure Container Apps, you'll need the trigger `type` and any other required parameters. You can also add other optional parameters, which vary based on the scaler you're using. In this example, you need the `accountName` and the name of the cloud environment that the queue belongs to `cloud` to set up your scaler in Azure Container Apps. @@ -259,7 +259,7 @@ Now your JSON config file should look like this: ... "scale": { "minReplicas": "0", - "maxReplicas": "10", + "maxReplicas": "30", "rules": [ { "name": "queue-trigger", @@ -279,7 +279,7 @@ Now your JSON config file should look like this: ``` > [!NOTE] -> KEDA ScaledJobs are not supported. See [KEDA scaling Jobs](https://keda.sh/docs/concepts/scaling-jobs/#overview) for more details. +> KEDA ScaledJobs are not supported. For more information, see [KEDA Scaling Jobs](https://keda.sh/docs/concepts/scaling-jobs/#overview). ## CPU @@ -359,12 +359,11 @@ The following example shows how to create a memory scaling rule. ## Considerations -- Vertical scaling is not supported. +- Vertical scaling isn't supported. - Replica quantities are a target amount, not a guarantee. - - Even if you set `maxReplicas` to `1`, there is no assurance of thread safety. - -- If you are using [Dapr actors](https://docs.dapr.io/developing-applications/building-blocks/actors/actors-overview/) to manage states, you should keep in mind that scaling to zero is not supported. Dapr uses virtual actors to manage asynchronous calls which means their in-memory representation is not tied to their identity or lifetime. + +- If you're using [Dapr actors](https://docs.dapr.io/developing-applications/building-blocks/actors/actors-overview/) to manage states, you should keep in mind that scaling to zero isn't supported. Dapr uses virtual actors to manage asynchronous calls, which means their in-memory representation isn't tied to their identity or lifetime. ## Next steps diff --git a/articles/container-apps/storage-mounts.md b/articles/container-apps/storage-mounts.md index a46f69a59a9e..461fbfa24323 100644 --- a/articles/container-apps/storage-mounts.md +++ b/articles/container-apps/storage-mounts.md @@ -20,6 +20,9 @@ A container app has access to different types of storage. A single app can take | [Temporary storage](#temporary-storage) | Temporary storage scoped to an individual replica | Sharing files between containers in a replica. For instance, the main app container can write log files that are processed by a sidecar container. | | [Azure Files](#azure-files) | Permanent storage | Writing files to a file share to make data accessible by other systems. | +> [!NOTE] +> The volume mounting features in Azure Container Apps are in preview. + ## Container file system A container can write to its own file system. @@ -160,7 +163,7 @@ See the [ARM template API specification](azure-resource-manager-api-spec.md) for ## Azure Files -You can mount a file share from [Azure Files](/azure/storage/files/) as a volume inside a container. +You can mount a file share from [Azure Files](../storage/files/index.yml) as a volume inside a container. Azure Files storage has the following characteristics: @@ -181,7 +184,7 @@ To enable Azure Files storage in your container, you need to set up your contain | Requirement | Instructions | |--|--| | Azure account | If you don't have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). | -| Azure Storage account | [Create a storage account](/azure/storage/common/storage-account-create?tabs=azure-cli#create-a-storage-account-1). | +| Azure Storage account | [Create a storage account](../storage/common/storage-account-create.md?tabs=azure-cli#create-a-storage-account-1). | | Azure Container Apps environment | [Create a container apps environment](environment.md). | ### Configuration @@ -346,4 +349,4 @@ The following ARM template snippets demonstrate how to add an Azure Files share See the [ARM template API specification](azure-resource-manager-api-spec.md) for a full example. -::: zone-end +::: zone-end \ No newline at end of file diff --git a/articles/container-instances/availability-zones.md b/articles/container-instances/availability-zones.md index 94d30ee12cda..217d26a59f1e 100644 --- a/articles/container-instances/availability-zones.md +++ b/articles/container-instances/availability-zones.md @@ -8,7 +8,7 @@ ms.custom: devx-track-js, devx-track-azurecli # Deploy an Azure Container Instances (ACI) container group in an availability zone (preview) -An [availability zone][availability-zone-overview] is a physically separate zone in an Azure region. You can use availability zones to protect your containerized applications from an unlikely failure or loss of an entire data center. Three types of Azure services support availability zones: *zonal*, *zone-redundant*, and *always-available* services. You can learn more about these types of services and how they promote resiliency in the [Highly available services section of Azure services that support availability zones](/azure/availability-zones/az-region#highly-available-services). +An [availability zone][availability-zone-overview] is a physically separate zone in an Azure region. You can use availability zones to protect your containerized applications from an unlikely failure or loss of an entire data center. Three types of Azure services support availability zones: *zonal*, *zone-redundant*, and *always-available* services. You can learn more about these types of services and how they promote resiliency in the [Highly available services section of Azure services that support availability zones](../availability-zones/az-region.md#highly-available-services). Azure Container Instances (ACI) supports *zonal* container group deployments, meaning the instance is pinned to a specific, self-selected availability zone. The availability zone is specified at the container group level. Containers within a container group can't have unique availability zones. To change your container group's availability zone, you must delete the container group and create another container group with the new availability zone. diff --git a/articles/container-instances/container-instances-troubleshooting.md b/articles/container-instances/container-instances-troubleshooting.md index 0db865054dab..8c6f5f358fcd 100644 --- a/articles/container-instances/container-instances-troubleshooting.md +++ b/articles/container-instances/container-instances-troubleshooting.md @@ -94,7 +94,7 @@ This error indicates that due to heavy load in the region in which you are attem ## Issues during container group runtime ### Container had an isolated restart without explicit user input -There are two broad categories for why a container group may restart without explicit user input. First, containers may experience restarts caused by an application process crash. The ACI service recommends leveraging observability solutions such as Application Insights SDK, container group metrics, and container group logs to determine why the application experienced issues. Second, customers may experience restarts initiated by the ACI infrastructure due to maintenance events. To increase the availability of your application, run multiple container groups behind an ingress component such as an Application Gateway or Traffic Manager. +There are two broad categories for why a container group may restart without explicit user input. First, containers may experience restarts caused by an application process crash. The ACI service recommends leveraging observability solutions such as [Application Insights SDK](../azure-monitor/app/app-insights-overview.md), [container group metrics](container-instances-monitor.md), and [container group logs](container-instances-get-logs.md) to determine why the application experienced issues. Second, customers may experience restarts initiated by the ACI infrastructure due to maintenance events. To increase the availability of your application, run multiple container groups behind an ingress component such as an [Application Gateway](../application-gateway/overview.md) or [Traffic Manager](../traffic-manager/traffic-manager-overview.md). ### Container continually exits and restarts (no long-running process) diff --git a/articles/container-registry/container-registry-check-health.md b/articles/container-registry/container-registry-check-health.md index bc040e32eed5..82a15bfc4f69 100644 --- a/articles/container-registry/container-registry-check-health.md +++ b/articles/container-registry/container-registry-check-health.md @@ -79,6 +79,10 @@ Fetch refresh token for registry 'myregistry.azurecr.io' : OK Fetch access token for registry 'myregistry.azurecr.io' : OK ``` +## Check if registry is configured with quarantine + +Once you enable a container registry to be quarantined, every image you publish to this repository will be quarantined. Any attempts to access or pull quarantined images will fail with an error. For more information, See [pull the quarantine image](https://github.com/Azure/acr/tree/main/docs/preview/quarantine#pull-the-quarantined-image). + ## Next steps For details about error codes returned by the [az acr check-health][az-acr-check-health] command, see the [Health check error reference](container-registry-health-error-reference.md). diff --git a/articles/container-registry/container-registry-faq.yml b/articles/container-registry/container-registry-faq.yml index 0566faca45f0..8314af76378d 100644 --- a/articles/container-registry/container-registry-faq.yml +++ b/articles/container-registry/container-registry-faq.yml @@ -550,9 +550,9 @@ sections: - question: | How to resolve if the Agent pool creation fails due to timeout issues? answer: | - Set up the correct [firewalls rules](/azure/container-registry/tasks-agent-pools#add-firewall-rules) to the existing network security groups or user-defined routes. After the setup, wait a few minutes for the firewall rules to apply. + Set up the correct [firewalls rules](./tasks-agent-pools.md#add-firewall-rules) to the existing network security groups or user-defined routes. After the setup, wait a few minutes for the firewall rules to apply. additionalContent: | ## Next steps - * [Learn more](container-registry-intro.md) about Azure Container Registry. + * [Learn more](container-registry-intro.md) about Azure Container Registry. \ No newline at end of file diff --git a/articles/container-registry/container-registry-private-link.md b/articles/container-registry/container-registry-private-link.md index e8736fb7feed..c4dc9a67bf5b 100644 --- a/articles/container-registry/container-registry-private-link.md +++ b/articles/container-registry/container-registry-private-link.md @@ -332,11 +332,11 @@ az acr update --name $REGISTRY_NAME --public-network-enabled false Consider the following options to execute the `az acr build` successfully. > [!NOTE] -> Once you disable public network [access here](/azure/container-registry/container-registry-private-link#disable-public-access), then `az acr build` commands will no longer work. +> Once you disable public network [access here](#disable-public-access), then `az acr build` commands will no longer work. -1. Assign a [dedicated agent pool.](/azure/container-registry/tasks-agent-pools#Virtual-network-support) -2. If agent pool is not available in the region, add the regional [Azure Container Registry Service Tag IPv4](/azure/virtual-network/service-tags-overview#use-the-service-tag-discovery-api) to the [firewall access rules.](/azure/container-registry/container-registry-firewall-access-rules#allow-access-by-ip-address-range) -3. Create an ACR task with a managed identity, and enable trusted services to [access network restricted ACR.](/azure/container-registry/allow-access-trusted-services#example-acr-tasks) +1. Assign a [dedicated agent pool.](./tasks-agent-pools.md) +2. If agent pool is not available in the region, add the regional [Azure Container Registry Service Tag IPv4](../virtual-network/service-tags-overview.md#use-the-service-tag-discovery-api) to the [firewall access rules.](./container-registry-firewall-access-rules.md#allow-access-by-ip-address-range) +3. Create an ACR task with a managed identity, and enable trusted services to [access network restricted ACR.](./allow-access-trusted-services.md#example-acr-tasks) ## Validate private link connection diff --git a/articles/container-registry/container-registry-support-policies.md b/articles/container-registry/container-registry-support-policies.md index 0e4e3adfa559..d06e4d5db9fe 100644 --- a/articles/container-registry/container-registry-support-policies.md +++ b/articles/container-registry/container-registry-support-policies.md @@ -18,7 +18,7 @@ This article provides details about Azure Container Registry (ACR) support polic >* [Encrypt using Customer managed keys](container-registry-customer-managed-keys.md) >* [Enable Content trust](container-registry-content-trust.md) >* [Scan Images using Azure Security Center](../defender-for-cloud/defender-for-container-registries-introduction.md) ->* [ACR Tasks](/azure/container-registry/container-registry-tasks-overview) +>* [ACR Tasks](./container-registry-tasks-overview.md) >* [Import container images to ACR](container-registry-import-images.md) >* [Image locking in ACR](container-registry-image-lock.md) >* [Synchronize content with ACR using Connected Registry](intro-connected-registry.md) @@ -68,4 +68,4 @@ This article provides details about Azure Container Registry (ACR) support polic ## Upstream bugs The ACR support will identify the root cause of every issue raised. The team will report all the identified bugs as an [issue in the ACR repository](https://github.com/Azure/acr/issues) with supporting details. The engineering team will review and provide a workaround solution, bug fix, or upgrade with a new release timeline. All the bug fixes integrate from upstream. -Customers can watch the issues, bug fixes, add more details, and follow the new releases. +Customers can watch the issues, bug fixes, add more details, and follow the new releases. \ No newline at end of file diff --git a/articles/cosmos-db/.openpublishing.redirection.cosmos-db.json b/articles/cosmos-db/.openpublishing.redirection.cosmos-db.json index b10c325d9156..a29e3a786bd7 100644 --- a/articles/cosmos-db/.openpublishing.redirection.cosmos-db.json +++ b/articles/cosmos-db/.openpublishing.redirection.cosmos-db.json @@ -3625,6 +3625,16 @@ "source_path_from_root": "/articles/cosmos-db/sql/advanced-threat-protection.md", "redirect_url": "/azure/cosmos-db/sql/defender-for-cosmos-db", "redirect_document_id": false - } + }, + { + "source_path_from_root": "/articles/cosmos-db/how-to-container-copy.md", + "redirect_url": "/azure/cosmos-db/", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/cosmos-db/intra-account-container-copy.md", + "redirect_url": "/azure/cosmos-db/", + "redirect_document_id": false + }, ] } diff --git a/articles/cosmos-db/TOC.yml b/articles/cosmos-db/TOC.yml index e4826934d89e..e5036dbbf4b7 100644 --- a/articles/cosmos-db/TOC.yml +++ b/articles/cosmos-db/TOC.yml @@ -160,8 +160,6 @@ href: data-residency.md - name: Automatic recommendations href: automated-recommendations.md - - name: Move data within Azure Cosmos DB - href: intra-account-container-copy.md - name: SQL/Core API expanded: true items: @@ -1599,9 +1597,6 @@ - name: Limit total account throughput displayName: cost, RUs, RU href: limit-total-account-throughput.md - - name: Move data within Cosmos DB - displayName: Move data within Cosmos DB - href: how-to-container-copy.md - name: Access preview features href: access-previews.md - name: Manage Azure Cosmos DB resources diff --git a/articles/cosmos-db/audit-restore-continuous.md b/articles/cosmos-db/audit-restore-continuous.md index 5fedbf7e5604..cb4d87d08449 100644 --- a/articles/cosmos-db/audit-restore-continuous.md +++ b/articles/cosmos-db/audit-restore-continuous.md @@ -12,7 +12,7 @@ ms.reviewer: wiassaf # Audit the point in time restore action for continuous backup mode in Azure Cosmos DB [!INCLUDE[appliesto-all-apis-except-cassandra](includes/appliesto-all-apis-except-cassandra.md)] -Azure Cosmos DB provides you the list of all the point in time restores for continuous mode that were performed on a Cosmos DB account using [Activity Logs](/azure/azure-monitor/essentials/activity-log). Activity logs can be viewed for any Cosmos DB account from the **Activity Logs** page in the Azure portal. The Activity Log shows all the operations that were triggered on the specific account. When a point in time restore is triggered, it shows up as `Restore Database Account` operation on the source account as well as the target account. The Activity Log for the source account can be used to audit restore events, and the activity logs on the target account can be used to get the updates about the progress of the restore. +Azure Cosmos DB provides you the list of all the point in time restores for continuous mode that were performed on a Cosmos DB account using [Activity Logs](../azure-monitor/essentials/activity-log.md). Activity logs can be viewed for any Cosmos DB account from the **Activity Logs** page in the Azure portal. The Activity Log shows all the operations that were triggered on the specific account. When a point in time restore is triggered, it shows up as `Restore Database Account` operation on the source account as well as the target account. The Activity Log for the source account can be used to audit restore events, and the activity logs on the target account can be used to get the updates about the progress of the restore. ## Audit the restores that were triggered on a live database account @@ -32,7 +32,7 @@ For the accounts that were already deleted, there would not be any database acco :::image type="content" source="media/restore-account-continuous-backup/continuous-backup-restore-details-deleted-json.png" alt-text="Azure Cosmos DB restore audit activity log." lightbox="media/restore-account-continuous-backup/continuous-backup-restore-details-deleted-json.png"::: -The activity logs can also be accessed using Azure CLI or Azure PowerShell. For more information on activity logs, review [Azure Activity log - Azure Monitor](/azure/azure-monitor/essentials/activity-log). +The activity logs can also be accessed using Azure CLI or Azure PowerShell. For more information on activity logs, review [Azure Activity log - Azure Monitor](../azure-monitor/essentials/activity-log.md). ## Track the progress of the restore operation @@ -48,4 +48,4 @@ The account status would be *Creating*, but it would have an Activity Log page. * Provision an account with continuous backup by using the [Azure portal](provision-account-continuous-backup.md#provision-portal), [PowerShell](provision-account-continuous-backup.md#provision-powershell), the [Azure CLI](provision-account-continuous-backup.md#provision-cli), or [Azure Resource Manager](provision-account-continuous-backup.md#provision-arm-template). * [Manage permissions](continuous-backup-restore-permissions.md) required to restore data with continuous backup mode. * Learn about the [resource model of continuous backup mode](continuous-backup-restore-resource-model.md). - * Explore the [Frequently asked questions for continuous mode](continuous-backup-restore-frequently-asked-questions.yml). + * Explore the [Frequently asked questions for continuous mode](continuous-backup-restore-frequently-asked-questions.yml). \ No newline at end of file diff --git a/articles/cosmos-db/continuous-backup-restore-introduction.md b/articles/cosmos-db/continuous-backup-restore-introduction.md index 495d0bc692b9..036d711c470c 100644 --- a/articles/cosmos-db/continuous-backup-restore-introduction.md +++ b/articles/cosmos-db/continuous-backup-restore-introduction.md @@ -102,7 +102,7 @@ For example, if you have 1 TB of data in two regions then: * Restore cost is calculated as (1000 * 0.15) = $150 per restore > [!TIP] -> For more information about measuring the current data usage of your Azure Cosmos DB account, see [Explore Azure Monitor Cosmos DB insights](/azure/azure-monitor/insights/cosmosdb-insights-overview#view-utilization-and-performance-metrics-for-azure-cosmos-db). +> For more information about measuring the current data usage of your Azure Cosmos DB account, see [Explore Azure Monitor Cosmos DB insights](../azure-monitor/insights/cosmosdb-insights-overview.md#view-utilization-and-performance-metrics-for-azure-cosmos-db). ## Customer-managed keys @@ -152,4 +152,4 @@ Currently the point in time restore functionality has the following limitations: * Restore continuous backup account using [Azure portal](restore-account-continuous-backup.md#restore-account-portal), [PowerShell](restore-account-continuous-backup.md#restore-account-powershell), [CLI](restore-account-continuous-backup.md#restore-account-cli), or [Azure Resource Manager](restore-account-continuous-backup.md#restore-arm-template). * [Migrate to an account from periodic backup to continuous backup](migrate-continuous-backup.md). * [Manage permissions](continuous-backup-restore-permissions.md) required to restore data with continuous backup mode. -* [Resource model of continuous backup mode](continuous-backup-restore-resource-model.md) +* [Resource model of continuous backup mode](continuous-backup-restore-resource-model.md) \ No newline at end of file diff --git a/articles/cosmos-db/hierarchical-partition-keys.md b/articles/cosmos-db/hierarchical-partition-keys.md index e467a6b35bc1..e23a50b45b51 100644 --- a/articles/cosmos-db/hierarchical-partition-keys.md +++ b/articles/cosmos-db/hierarchical-partition-keys.md @@ -397,7 +397,7 @@ You can test the subpartitioning feature using the latest version of the local e .\CosmosDB.Emulator.exe /EnablePreview ``` -For more information, see [Azure Cosmos DB emulator](/azure/cosmos-db/local-emulator). +For more information, see [Azure Cosmos DB emulator](./local-emulator.md). ## Limitations and known issues @@ -414,4 +414,4 @@ For more information, see [Azure Cosmos DB emulator](/azure/cosmos-db/local-emul * See the FAQ on [hierarchical partition keys.](hierarchical-partition-keys-faq.yml) * Learn more about [partitioning in Azure Cosmos DB.](partitioning-overview.md) -* Learn more about [using Azure Resource Manager templates with Azure Cosmos DB.](/azure/templates/microsoft.documentdb/databaseaccounts) +* Learn more about [using Azure Resource Manager templates with Azure Cosmos DB.](/azure/templates/microsoft.documentdb/databaseaccounts) \ No newline at end of file diff --git a/articles/cosmos-db/how-to-container-copy.md b/articles/cosmos-db/how-to-container-copy.md deleted file mode 100644 index d94282e22cfe..000000000000 --- a/articles/cosmos-db/how-to-container-copy.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Create and manage intra-account container copy jobs in Azure Cosmos DB -description: Learn how to create, monitor, and manage container copy jobs within an Azure Cosmos DB account using CLI commands. -author: nayakshweta -ms.service: cosmos-db -ms.topic: how-to -ms.date: 04/18/2022 -ms.author: shwetn ---- - -# Create and manage intra-account container copy jobs in Azure Cosmos DB (Preview) -[!INCLUDE[appliesto-sql-cassandra-api](includes/appliesto-sql-cassandra-api.md)] - -[Container copy jobs](intra-account-container-copy.md) creates offline copies of collections within an Azure Cosmos DB account. - -This article describes how to create, monitor, and manage intra-account container copy jobs using Azure CLI commands. - -## Set shell variables - -First, set all of the variables that each individual script will use. - -```azurecli-interactive -$accountName = "" -$resourceGroup = "" -$jobName = "" -$sourceDatabase = "" -$sourceContainer = "" -$destinationDatabase = "" -$destinationContainer = "" -``` - -## Create an intra-account container copy job for SQL API account - -Create a job to copy a container within an Azure Cosmos DB SQL API account: - -```azurecli-interactive -az cosmosdb dts copy \ - --resource-group $resourceGroup \ - --job-name $jobName \ - --account-name $accountName \ - --source-sql-container database=$sourceDatabase container=$sourceContainer \ - --dest-sql-container database=$destinationDatabase container=$destinationContainer -``` - -## Create intra-account container copy job for Cassandra API account - -Create a job to copy a container within an Azure Cosmos DB Cassandra API account: - -```azurecli-interactive -az cosmosdb dts copy \ - --resource-group $resourceGroup \ - --job-name $jobName \ - --account-name $accountName \ - --source-cassandra-table keyspace=$sourceKeySpace table=$sourceTable \ - --dest-cassandra-table keyspace=$destinationKeySpace table=$destinationTable -``` - -## Monitor the progress of a container copy job - -View the progress and status of a copy job: - -```azurecli-interactive -az cosmosdb dts show \ - --account-name $accountName \ - --resource-group $resourceGroup \ - --job-name $jobName -``` - -## List all the container copy jobs created in an account - -To list all the container copy jobs created in an account: - -```azurecli-interactive -az cosmosdb dts list \ - --account-name $accountName \ - --resource-group $resourceGroup -``` - -## Pause a container copy job - -In order to pause an ongoing container copy job, you may use the command: - -```azurecli-interactive -az cosmosdb dts pause \ - --account-name $accountName \ - --resource-group $resourceGroup \ - --job-name $jobName -``` - -## Resume a container copy job - -In order to resume an ongoing container copy job, you may use the command: - -```azurecli-interactive -az cosmosdb dts resume \ - --account-name $accountName \ - --resource-group $resourceGroup \ - --job-name $jobName -``` - -## Next steps - -- For more information about intra-account container copy jobs, see [Container copy jobs](intra-account-container-copy.md). diff --git a/articles/cosmos-db/includes/appliesto-sql-cassandra-api.md b/articles/cosmos-db/includes/appliesto-sql-cassandra-api.md deleted file mode 100644 index ce57fb48aea5..000000000000 --- a/articles/cosmos-db/includes/appliesto-sql-cassandra-api.md +++ /dev/null @@ -1 +0,0 @@ -APPLIES TO: :::image type="icon" source="../media/applies-to/yes.png" border="false":::SQL API :::image type="icon" source="../media/applies-to/yes.png" border="false":::Cassandra API \ No newline at end of file diff --git a/articles/cosmos-db/intra-account-container-copy.md b/articles/cosmos-db/intra-account-container-copy.md deleted file mode 100644 index fe4fc3e58de2..000000000000 --- a/articles/cosmos-db/intra-account-container-copy.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: Intra-account container copy jobs in Azure Cosmos DB -description: Learn about container data copy capability within an Azure Cosmos DB account. -author: nayakshweta -ms.service: cosmos-db -ms.topic: conceptual -ms.date: 04/18/2022 -ms.author: shwetn ---- - -# Intra-account container copy jobs in Azure Cosmos DB (Preview) -[!INCLUDE[appliesto-sql-cassandra-api](includes/appliesto-sql-cassandra-api.md)] - -You can perform offline container copy within an Azure Cosmos DB account using container copy jobs. - -You may need to copy data within your Azure Cosmos DB account if you want to achieve any of these scenarios: - -* Copy all items from one container to another. -* Change the [granularity at which throughput is provisioned - from database to container](set-throughput.md) and vice-versa. -* Change the [partition key](partitioning-overview.md#choose-partitionkey) of a container. -* Update the [unique keys](unique-keys.md) for a container. -* Rename a container/database. -* Adopt new features that are only supported on new containers. - -Intra-account container copy jobs can be currently [created and managed using CLI commands](how-to-container-copy.md). - -## Get started - -To get started using container copy jobs, enroll in the preview by filing a support ticket in the [Azure portal](https://portal.azure.com). - -## How does intra-account container copy work? - -Intra-account container copy jobs perform offline data copy using the source container's incremental change feed log. - -* Within the platform, we allocate two 4-vCPU 16-GB memory server-side compute instances per Azure Cosmos DB account by default. -* The instances are allocated when one or more container copy jobs are created within the account. -* The container copy jobs run on these instances. -* The instances are shared by all the container copy jobs running within the same account. -* The platform may de-allocate the instances if they're idle for >15 mins. - -> [!NOTE] -> We currently only support offline container copy. So, we strongly recommend to stop performing any operations on the source container prior to beginning the container copy. -> Item deletions and updates done on the source container after beginning the copy job may not be captured. Hence, continuing to perform operations on the source container while the container job is in progress may result in data missing on the target container. - -## Overview of steps needed to do a container copy - -1. Stop the operations on the source container by pausing the application instances or any clients connecting to it. -2. [Create the container copy job](how-to-container-copy.md). -3. [Monitor the progress of the container copy job](how-to-container-copy.md#monitor-the-progress-of-a-container-copy-job) and wait until it's completed. -4. Resume the operations by appropriately pointing the application or client to the source or target container copy as intended. - -## Factors affecting the rate of container copy job - -The rate of container copy job progress is determined by these factors: - -* Source container/database throughput setting. - -* Target container/database throughput setting. - -* Server-side compute instances allocated to the Azure Cosmos DB account for the performing the data transfer. - - > [!IMPORTANT] - > The default SKU offers two 4-vCPU 16-GB server-side instances per account. You may opt to sign up for [larger SKUs](#large-skus-preview) in preview. - -## FAQs - -### Is there an SLA for the container copy jobs? - -Container copy jobs are currently supported on best-effort basis. We don't provide any SLA guarantees for the time taken to complete these jobs. - -### Can I create multiple container copy jobs within an account? - -Yes, you can create multiple jobs within the same account. The jobs will run consecutively. You can [list all the jobs](how-to-container-copy.md#list-all-the-container-copy-jobs-created-in-an-account) created within an account and monitor their progress. - -### Can I copy an entire database within the Azure Cosmos DB account? - -You'll have to create a job for each collection in the database. - -### I have an Azure Cosmos DB account with multiple regions. In which region will the container copy job run? - -The container copy job will run in the write region. If there are accounts configured with multi-region writes, the job will run in one of the regions from the list. - -### What happens to the container copy jobs when the account's write region changes? - -The account's write region may change in the rare scenario of a region outage or due to manual failover. In such scenario, incomplete container copy jobs created within the account would fail. You would need to recreate such jobs. Recreated jobs would then run against the new (current) write region. - -## Large SKUs preview - -If you want to run the container copy jobs faster, you may do so by adjusting one of the [factors that affect the rate of the copy job](#factors-affecting-the-rate-of-container-copy-job). In order to adjust the configuration of the server-side compute instances, you may sign up for "Large SKU support for container copy" preview. - -This preview will allow you to choose larger a SKU size for the server-side instances. Large SKU sizes are billable at a higher rate. You can also choose a node count of up to 5 of these instances. - -## Next Steps - -- You can learn about [how to create, monitor and manage container copy jobs within Azure Cosmos DB account using CLI commands](how-to-container-copy.md). diff --git a/articles/cosmos-db/monitor-cosmos-db.md b/articles/cosmos-db/monitor-cosmos-db.md index c51f54c4e5d2..8efaff0fb3fd 100644 --- a/articles/cosmos-db/monitor-cosmos-db.md +++ b/articles/cosmos-db/monitor-cosmos-db.md @@ -135,14 +135,14 @@ Azure Cosmos DB stores data in the following tables. ### Sample Kusto queries -Prior to using Log Analytics to issue Kusto queries, you must [enable diagnostic logs for control plane operations](/azure/cosmos-db/audit-control-plane-logs#enable-diagnostic-logs-for-control-plane-operations). When enabling diagnostic logs, you will select between storing your data in a single [AzureDiagnostics table (legacy)](/azure/azure-monitor/essentials/resource-logs#azure-diagnostics-mode) or [resource-specific tables](/azure/azure-monitor/essentials/resource-logs#resource-specific). +Prior to using Log Analytics to issue Kusto queries, you must [enable diagnostic logs for control plane operations](./audit-control-plane-logs.md#enable-diagnostic-logs-for-control-plane-operations). When enabling diagnostic logs, you will select between storing your data in a single [AzureDiagnostics table (legacy)](../azure-monitor/essentials/resource-logs.md#azure-diagnostics-mode) or [resource-specific tables](../azure-monitor/essentials/resource-logs.md#resource-specific). When you select **Logs** from the Azure Cosmos DB menu, Log Analytics is opened with the query scope set to the current Azure Cosmos DB account. Log queries will only include data from that resource. > [!IMPORTANT] > If you want to run a query that includes data from other accounts or data from other Azure services, select **Logs** from the **Azure Monitor** menu. For more information, see [Log query scope and time range in Azure Monitor Log Analytics](../azure-monitor/logs/scope.md). -Here are some queries that you can enter into the **Log search** search bar to help you monitor your Azure Cosmos resources. The exact text of the queries will depend on the [collection mode](/azure/azure-monitor/essentials/resource-logs#select-the-collection-mode) you selected when you enabled diagnostics logs. +Here are some queries that you can enter into the **Log search** search bar to help you monitor your Azure Cosmos resources. The exact text of the queries will depend on the [collection mode](../azure-monitor/essentials/resource-logs.md#select-the-collection-mode) you selected when you enabled diagnostics logs. #### [AzureDiagnostics table (legacy)](#tab/azure-diagnostics) @@ -270,4 +270,4 @@ To learn more, see the [Azure monitoring REST API](../azure-monitor/essentials/r ## Next steps * See [Azure Cosmos DB monitoring data reference](monitor-cosmos-db-reference.md) for a reference of the logs and metrics created by Azure Cosmos DB. -* See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. +* See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/cosmos-db/scripts/cli/cassandra/autoscale.md b/articles/cosmos-db/scripts/cli/cassandra/autoscale.md index 27c88719ff85..04cd1da13336 100644 --- a/articles/cosmos-db/scripts/cli/cassandra/autoscale.md +++ b/articles/cosmos-db/scripts/cli/cassandra/autoscale.md @@ -22,7 +22,7 @@ The script in this article creates an Azure Cosmos DB Cassandra API account, key - This script requires Azure CLI version 2.12.1 or later. - - You can run the script in the Bash environment in [Azure Cloud Shell](/azure/cloud-shell/quickstart). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. + - You can run the script in the Bash environment in [Azure Cloud Shell](../../../../cloud-shell/quickstart.md). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. [![Launch Cloud Shell in a new window](../../../../../includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com) @@ -49,4 +49,4 @@ az group delete --name $resourceGroup ## Next steps -[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) +[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) \ No newline at end of file diff --git a/articles/cosmos-db/scripts/cli/gremlin/autoscale.md b/articles/cosmos-db/scripts/cli/gremlin/autoscale.md index 8e43acf9c6fb..8df89fc56ed9 100644 --- a/articles/cosmos-db/scripts/cli/gremlin/autoscale.md +++ b/articles/cosmos-db/scripts/cli/gremlin/autoscale.md @@ -22,7 +22,7 @@ The script in this article creates an Azure Cosmos DB Gremlin API account, datab - This script requires Azure CLI version 2.30 or later. - - You can run the script in the Bash environment in [Azure Cloud Shell](/azure/cloud-shell/quickstart). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. + - You can run the script in the Bash environment in [Azure Cloud Shell](../../../../cloud-shell/quickstart.md). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. [![Launch Cloud Shell in a new window](../../../../../includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com) @@ -49,4 +49,4 @@ az group delete --name $resourceGroup ## Next steps -[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) +[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) \ No newline at end of file diff --git a/articles/cosmos-db/scripts/cli/gremlin/serverless.md b/articles/cosmos-db/scripts/cli/gremlin/serverless.md index 91de3935ad98..cb2ace352587 100644 --- a/articles/cosmos-db/scripts/cli/gremlin/serverless.md +++ b/articles/cosmos-db/scripts/cli/gremlin/serverless.md @@ -22,7 +22,7 @@ The script in this article creates an Azure Cosmos DB Gremlin API serverless acc - This script requires Azure CLI version 2.30 or later. - - You can run the script in the Bash environment in [Azure Cloud Shell](/azure/cloud-shell/quickstart). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. + - You can run the script in the Bash environment in [Azure Cloud Shell](../../../../cloud-shell/quickstart.md). When Cloud Shell opens, make sure to select **Bash** in the environment field at the upper left of the shell window. Cloud Shell has the latest version of Azure CLI. [![Launch Cloud Shell in a new window](../../../../../includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com) @@ -49,4 +49,4 @@ az group delete --name $resourceGroup ## Next steps -[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) +[Azure Cosmos DB CLI documentation](/cli/azure/cosmosdb) \ No newline at end of file diff --git a/articles/cosmos-db/serverless.md b/articles/cosmos-db/serverless.md index ad640aaa8497..6e023548785b 100644 --- a/articles/cosmos-db/serverless.md +++ b/articles/cosmos-db/serverless.md @@ -50,7 +50,7 @@ Any container that is created in a serverless account is a serverless container. - Serverless containers can store a maximum of 50 GB of data and indexes. > [!NOTE] -> Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](/azure/azure-resource-manager/management/preview-features). +> Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](../azure-resource-manager/management/preview-features.md). ## Monitoring your consumption @@ -74,4 +74,4 @@ Get started with serverless with the following articles: - [Request Units in Azure Cosmos DB](request-units.md) - [Choose between provisioned throughput and serverless](throughput-serverless.md) -- [Pricing model in Azure Cosmos DB](how-pricing-works.md) +- [Pricing model in Azure Cosmos DB](how-pricing-works.md) \ No newline at end of file diff --git a/articles/cosmos-db/sql/sql-query-join.md b/articles/cosmos-db/sql/sql-query-join.md index 0bc2b234acb8..5bfa4007e450 100644 --- a/articles/cosmos-db/sql/sql-query-join.md +++ b/articles/cosmos-db/sql/sql-query-join.md @@ -215,7 +215,7 @@ The results are: ``` > [!IMPORTANT] -> This example uses mulitple JOIN expressions in a single query. There is a maximum amount of JOINs that can be used in a single query. For more information, see [SQL query limits](/azure/cosmos-db/concepts-limits#sql-query-limits). +> This example uses mulitple JOIN expressions in a single query. There is a maximum amount of JOINs that can be used in a single query. For more information, see [SQL query limits](../concepts-limits.md#sql-query-limits). The following extension of the preceding example performs a double join. You could view the cross product as the following pseudo-code: @@ -302,4 +302,4 @@ For example, consider the earlier query that projected the familyName, child's g - [Getting started](sql-query-getting-started.md) - [Azure Cosmos DB .NET samples](https://github.com/Azure/azure-cosmosdb-dotnet) -- [Subqueries](sql-query-subquery.md) +- [Subqueries](sql-query-subquery.md) \ No newline at end of file diff --git a/articles/cosmos-db/table/how-to-use-python.md b/articles/cosmos-db/table/how-to-use-python.md index 0202f81ca961..916182df25ae 100644 --- a/articles/cosmos-db/table/how-to-use-python.md +++ b/articles/cosmos-db/table/how-to-use-python.md @@ -22,7 +22,7 @@ This quickstart shows how to access the Azure Cosmos DB [Table API](introduction The sample application is written in [Python3.6](https://www.python.org/downloads/), though the principles apply to all Python3.6+ applications. You can use [Visual Studio Code](https://code.visualstudio.com/) as an IDE. -If you don't have an [Azure subscription](/azure/guides/developer/azure-developer-guide#understanding-accounts-subscriptions-and-billing), create a [free account](https://azure.microsoft.com/free/dotnet) before you begin. +If you don't have an [Azure subscription](../../guides/developer/azure-developer-guide.md#understanding-accounts-subscriptions-and-billing), create a [free account](https://azure.microsoft.com/free/dotnet) before you begin. ## Sample application @@ -426,4 +426,4 @@ Remove-AzResourceGroup -Name $resourceGroupName In this quickstart, you've learned how to create an Azure Cosmos DB account, create a table using the Data Explorer, and run an app. Now you can query your data using the Table API. > [!div class="nextstepaction"] -> [Import table data to the Table API](table-import.md) +> [Import table data to the Table API](table-import.md) \ No newline at end of file diff --git a/articles/cosmos-db/throughput-serverless.md b/articles/cosmos-db/throughput-serverless.md index 2e2bcafec988..34d4f361d276 100644 --- a/articles/cosmos-db/throughput-serverless.md +++ b/articles/cosmos-db/throughput-serverless.md @@ -29,7 +29,7 @@ Azure Cosmos DB is available in two different capacity modes: [provisioned throu | Performance | < 10-ms latency for point-reads and writes covered by SLA | < 10-ms latency for point-reads and < 30 ms for writes covered by SLO | | Billing model | Billing is done on a per-hour basis for the RU/s provisioned, regardless of how many RUs were consumed. | Billing is done on a per-hour basis for the number of RUs consumed by your database operations. | -1 Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](/azure/azure-resource-manager/management/preview-features). +1 Serverless containers up to 1 TB are currently in preview with Azure Cosmos DB. To try the new feature, register the *"Azure Cosmos DB Serverless 1 TB Container Preview"* [preview feature in your Azure subscription](../azure-resource-manager/management/preview-features.md). ## Estimating your expected consumption @@ -56,4 +56,4 @@ For more information, see [estimating serverless costs](plan-manage-costs.md#est - Read more about [provisioning throughput on Azure Cosmos DB](set-throughput.md) - Read more about [Azure Cosmos DB serverless](serverless.md) -- Get familiar with the concept of [Request Units](request-units.md) +- Get familiar with the concept of [Request Units](request-units.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/create-customer-subscription.md b/articles/cost-management-billing/manage/create-customer-subscription.md new file mode 100644 index 000000000000..7500190626c0 --- /dev/null +++ b/articles/cost-management-billing/manage/create-customer-subscription.md @@ -0,0 +1,68 @@ +--- +title: Create a subscription for a partner's customer +titleSuffix: Azure Cost Management + Billing +description: Learn how a Microsoft Partner creates a subscription for a customer in the Azure portal. +author: bandersmsft +ms.reviewer: amberb +ms.service: cost-management-billing +ms.subservice: billing +ms.topic: conceptual +ms.date: 05/25/2022 +ms.author: banders +--- + +# Create a subscription for a partner's customer + +This article helps a Microsoft Partner with a [Microsoft Partner Agreement](https://www.microsoft.com/licensing/news/introducing-microsoft-partner-agreement) create a [Microsoft Customer Agreement](https://azure.microsoft.com/pricing/purchase-options/microsoft-customer-agreement/) subscription for their customer. + +To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). + +## Permission required to create Azure subscriptions + +You need the following permissions to create customer subscriptions: + +- Global Admin and Admin Agent role in the CSP partner organization. + +For more information, see [Partner Center - Assign users roles and permissions](/partner-center/permissions-overview). The user needs to sign in to the partner tenant to create Azure subscriptions. + +## Create a subscription as a partner for a customer + +Partners with a Microsoft Partner Agreement use the following steps to create a new Microsoft Azure Plan subscription for their customers. The subscription is created under the partner’s billing account and billing profile. + +1. Sign in to the Azure portal using your Partner Center account. + Make sure you are in your Partner Center directory (tenant), not a customer’s tenant. +1. Navigate to **Cost Management + Billing**. +1. Select the Billing scope for your billing account where the customer account resides. +1. In the left menu under **Billing**, select **Customers**. + :::image type="content" source="./media/create-customer-subscription/customers-list.png" alt-text="Screenshot showing the Customers list where you see your list of customers." lightbox="./media/create-customer-subscription/customers-list.png" ::: +1. On the Customers page, select the customer. If you have only one customer, the selection is unavailable. +1. In the left menu, under **Products + services**, select **All billing subscriptions**. +1. On the Azure subscription page, select **+ Add** to create a subscription. Then select the type of subscription to add. For example, **Usage based/ Azure subscription**. + :::image type="content" source="./media/create-customer-subscription/all-billing-subscriptions-add.png" alt-text="Screenshot showing navigation to Add where you create a customer subscription." lightbox="./media/create-customer-subscription/all-billing-subscriptions-add.png" ::: +1. On the Basics tab, enter a subscription name. +1. Select the partner's billing account. +1. Select the partner's billing profile. +1. Select the customer that you're creating the subscription for. +1. If applicable, select a reseller. +1. Next to **Plan**, select **Microsoft Azure Plan for DevTest** if the subscription will be used for development or testing workloads. Otherwise, select **Microsoft Azure Plan**. + :::image type="content" source="./media/create-customer-subscription/create-customer-subscription-basics-tab.png" alt-text="Screenshot showing the Basics tab where you enter basic information about the customer subscription." lightbox="./media/create-customer-subscription/create-customer-subscription-basics-tab.png" ::: +1. Optionally, select the Tags tab and then enter tag pairs for **Name** and **Value**. +1. Select **Review + create**. You should see a message stating `Validation passed`. +1. Verify that the subscription information is correct, then select **Create**. You'll see a notification that the subscription is getting created. + +After the new subscription is created, the customer can see it in on the **Subscriptions** page. + +## Create an Azure subscription programmatically + +You can also create subscriptions programmatically. For more information, see [Create Azure subscriptions programmatically](programmatically-create-subscription.md). + +## Need help? Contact us. + +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). + +## Next steps + +- [Add or change Azure subscription administrators](add-change-subscription-administrator.md) +- [Move resources to new resource group or subscription](../../azure-resource-manager/management/move-resource-group-and-subscription.md) +- [Create management groups for resource organization and management](../../governance/management-groups/create-management-group-portal.md) +- [Cancel your subscription for Azure](cancel-azure-subscription.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/create-enterprise-subscription.md b/articles/cost-management-billing/manage/create-enterprise-subscription.md new file mode 100644 index 000000000000..24166635fcdd --- /dev/null +++ b/articles/cost-management-billing/manage/create-enterprise-subscription.md @@ -0,0 +1,67 @@ +--- +title: Create an Enterprise Agreement subscription +titleSuffix: Azure Cost Management + Billing +description: Learn how to add a new Enterprise Agreement subscription in the Azure portal. See information about billing account forms and view other available resources. +author: bandersmsft +ms.reviewer: amberb +ms.service: cost-management-billing +ms.subservice: billing +ms.topic: conceptual +ms.date: 05/25/2022 +ms.author: banders +--- + +# Create an Enterprise Agreement subscription + +This article helps you create an [Enterprise Agreement (EA)](https://azure.microsoft.com/pricing/enterprise-agreement/) subscription for yourself or for someone else in your current Azure Active Directory (Azure AD) directory/tenant. You may want another subscription to avoid hitting subscription quota limits, to create separate environments for security, or to isolate data for compliance reasons. + +If you want to create subscriptions for Microsoft Customer Agreements, see [Create a Microsoft Customer Agreement subscription](create-subscription.md). If you're a Microsoft Partner and you want to create a subscription for a customer, see [Create a subscription for a partner's customer](create-customer-subscription.md). Or, if you have a Microsoft Online Service Program (MOSP) billing account, also called pay-as-you-go, you can create subscriptions starting in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade) and then you complete the process at https://signup.azure.com/. + +To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). + +## Permission required to create Azure subscriptions + +You need the following permissions to create subscriptions for an EA: + +- Account Owner role on the Enterprise Agreement enrollment. For more information, see [Understand Azure Enterprise Agreement administrative roles in Azure](understand-ea-roles.md). + +## Create an EA subscription + +Use the following information to create an EA subscription. + +1. Sign in to the [Azure portal](https://portal.azure.com). +1. Navigate to **Subscriptions** and then select **Add**. + :::image type="content" source="./media/create-enterprise-subscription/subscription-add.png" alt-text="Screenshot showing the Subscription page where you Add a subscription." lightbox="./media/create-enterprise-subscription/subscription-add.png" ::: +1. On the Create a subscription page, on the **Basics** tab, type a **Subscription name**. +1. Select the **Billing account** where the new subscription will get created. +1. Select the **Enrollment account** where the subscription will get created. +1. Select an **Offer type**, select **Enterprise Dev/Test** if the subscription will be used for development or testing workloads. Otherwise, select **Microsoft Azure Enterprise**. + :::image type="content" source="./media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png" alt-text="Screenshot showing the Basics tab where you enter basic information about the enterprise subscription." lightbox="./media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png" ::: +1. Select the **Advanced** tab. +1. Select your **Subscription directory**. It's the Azure Active Directory (Azure AD) where the new subscription will get created. +1. Select a **Management group**. It's the Azure AD management group that the new subscription is associated with. You can only select management groups in the current directory. +1. Select more or more **Subscription owners**. You can select only users or service principals in the selected subscription directory. You can't select guest directory users. If you select a service principal, enter its App ID. + :::image type="content" source="./media/create-enterprise-subscription/create-subscription-advanced-tab.png" alt-text="Screenshot showing the Advanced tab where you specify the directory, management group, and owner for the EA subscription. " lightbox="./media/create-enterprise-subscription/create-subscription-advanced-tab.png" ::: +1. Select the **Tags** tab. +1. Enter tag pairs for **Name** and **Value**. + :::image type="content" source="./media/create-enterprise-subscription/create-subscription-tags-tab.png" alt-text="Screenshot showing the tags tab where you enter tag and value pairs." lightbox="./media/create-enterprise-subscription/create-subscription-tags-tab.png" ::: +1. Select **Review + create**. You should see a message stating `Validation passed`. +1. Verify that the subscription information is correct, then select **Create**. You'll see a notification that the subscription is getting created. + +After the new subscription is created, the account owner can see it in on the **Subscriptions** page. + + +## Create an Azure subscription programmatically + +You can also create subscriptions programmatically. For more information, see [Create Azure subscriptions programmatically](programmatically-create-subscription.md). + +## Need help? Contact us. + +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). + +## Next steps + +- [Add or change Azure subscription administrators](add-change-subscription-administrator.md) +- [Move resources to new resource group or subscription](../../azure-resource-manager/management/move-resource-group-and-subscription.md) +- [Create management groups for resource organization and management](../../governance/management-groups/create-management-group-portal.md) +- [Cancel your subscription for Azure](cancel-azure-subscription.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/create-subscription-request.md b/articles/cost-management-billing/manage/create-subscription-request.md new file mode 100644 index 000000000000..28bc914ee25e --- /dev/null +++ b/articles/cost-management-billing/manage/create-subscription-request.md @@ -0,0 +1,93 @@ +--- +title: Create a Microsoft Customer Agreement subscription request +titleSuffix: Azure Cost Management + Billing +description: Learn how to create an Azure subscription request in the Azure portal. See information about billing account forms and view other available resources. +author: bandersmsft +ms.reviewer: amberb +ms.service: cost-management-billing +ms.subservice: billing +ms.topic: conceptual +ms.date: 05/25/2022 +ms.author: banders +--- + +# Create a Microsoft Customer Agreement subscription request + +This article helps you create a [Microsoft Customer Agreement](https://azure.microsoft.com/pricing/purchase-options/microsoft-customer-agreement/) subscription for someone else that's in a different Azure Active Directory (Azure AD) directory/tenant. After the request is created, the recipient accepts the subscription request. You may want another subscription to avoid hitting subscription quota limits, to create separate environments for security, or to isolate data for compliance reasons. + +If you instead want to create a subscription for yourself or for someone else in your current Azure Active Directory (Azure AD) directory/tenant, see [Create a Microsoft Customer Agreement subscription](create-subscription.md). If you want to create subscriptions for Enterprise Agreements, see [Create an EA subscription](create-enterprise-subscription.md). If you're a Microsoft Partner and you want to create a subscription for a customer, see [Create a subscription for a partner's customer](create-customer-subscription.md). Or, if you have a Microsoft Online Service Program (MOSP) billing account, also called pay-as-you-go, you can create subscriptions starting in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade) and then you complete the process at https://signup.azure.com/. + +To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). + +## Permission required to create Azure subscriptions + +You need one of the following permissions to create a Microsoft Customer Agreement (MCA) subscription request. + +- Owner or contributor role on the invoice section, billing profile or billing account. +- Azure subscription creator role on the invoice section. + +For more information, see [Subscription billing roles and task](understand-mca-roles.md#subscription-billing-roles-and-tasks). + +## Create a subscription request + +The subscription creator uses the following procedure to create a subscription request for a person in a different Azure Active Directory (Azure AD). After creation, the request is sent to the subscription acceptor (recipient) by email. + +A link to the subscription request is also created. The creator can manually share the link with the acceptor. + +1. Sign in to the [Azure portal](https://portal.azure.com). +1. Navigate to **Subscriptions** and then select **Add**. + :::image type="content" source="./media/create-subscription-request/subscription-add.png" alt-text="Screenshot showing the Subscription page where you Add a subscription." lightbox="./media/create-subscription-request/subscription-add.png" ::: +1. On the Create a subscription page, on the **Basics** tab, type a **Subscription name**. +1. Select the **Billing account** where the new subscription will get created. +1. Select the **Billing profile** where the subscription will get created. +1. Select the **Invoice section** where the subscription will get created. +1. Next to **Plan**, select **Microsoft Azure Plan for DevTest** if the subscription will be used for development or testing workloads. Otherwise, select **Microsoft Azure Plan**. + :::image type="content" source="./media/create-subscription-request/create-subscription-basics-tab.png" alt-text="Screenshot showing the Basics tab where you enter basic information about the subscription." lightbox="./media/create-subscription-request/create-subscription-basics-tab.png" ::: +1. Select the **Advanced** tab. +1. Select your **Subscription directory**. It's the Azure Active Directory (Azure AD) where the new subscription will get created. +1. The **Management group** option is unavailable because you can only select management groups in the current directory. +1. Select more or more **Subscription owners**. You can select only users or service principals in the selected subscription directory. You can't select guest directory users. If you select a service principal, enter its App ID. + :::image type="content" source="./media/create-subscription-request/create-subscription-advanced-tab-external.png" alt-text="Screenshot showing the Advanced tab where you specify the directory, management group, and owner. " lightbox="./media/create-subscription-request/create-subscription-advanced-tab-external.png" ::: +1. Select the **Tags** tab. +1. Enter tag pairs for **Name** and **Value**. + :::image type="content" source="./media/create-subscription-request/create-subscription-tags-tab.png" alt-text="Screenshot showing the tags tab where you enter tag and value pairs." lightbox="./media/create-subscription-request/create-subscription-tags-tab.png" ::: +1. Select **Review + create**. You should see a message stating `The subscription will be created once the subscription owner accepts this request in the target directory.` +1. Verify that the subscription information is correct, then select **Request**. You'll see a notification that the request is getting created and sent to the acceptor. + +After the new subscription is sent, the acceptor receives an email with subscription acceptance information with a link where they can accept the new subscription. + +The subscription creator can also view the subscription request details from **Subscriptions** > **View Requests**. There they can open the subscription request to view its details and copy the **Accept ownership URL**. Then they can manually send the link to the subscription acceptor. + +:::image type="content" source="./media/create-subscription-request/view-requests-accept-url.png" alt-text="Screenshot showing the Accept ownership URL that you can copy to manually send to the acceptor." lightbox="./media/create-subscription-request/view-requests-accept-url.png" ::: + +## Accept subscription ownership + +The subscription acceptor receives an email inviting them to accept subscription ownership. Select the **Accept ownership** get started. + +:::image type="content" source="./media/create-subscription-request/accept-subscription-ownership-email.png" alt-text="Screenshot showing the email with the Accept Ownership link." lightbox="./media/create-subscription-request/accept-subscription-ownership-email.png" ::: + +Or, the subscription creator might have manually sent the acceptor an **Accept ownership URL** link. The acceptor uses the following steps to review and accept subscription ownership. + +1. In either case above, select the link to open the Accept subscription ownership page in the Azure portal. +1. On the Basics tab, you can optionally change the subscription name. +1. Select the Advanced tab where you can optionally change the Azure AD management group that the new subscription is associated with. You can only select management groups in the current directory. +1. Select the Tags tab to optionally enter tag pairs for **Name** and **Value**. +1. Select the Review + accept tab. You should see a message stating `Validation passed. Click on the Accept button below to initiate subscription creation`. +1. Select **Accept**. You'll see a status message stating that the subscription is being created. Then you'll see another status message stating th the subscription was successfully created. The acceptor becomes the subscription owner. + +After the new subscription is created, the acceptor can see it in on the **Subscriptions** page. + +## Create an Azure subscription programmatically + +You can also create subscriptions programmatically. For more information, see [Create Azure subscriptions programmatically](programmatically-create-subscription.md). + +## Need help? Contact us. + +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). + +## Next steps + +- [Add or change Azure subscription administrators](add-change-subscription-administrator.md) +- [Move resources to new resource group or subscription](../../azure-resource-manager/management/move-resource-group-and-subscription.md) +- [Create management groups for resource organization and management](../../governance/management-groups/create-management-group-portal.md) +- [Cancel your subscription for Azure](cancel-azure-subscription.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/create-subscription.md b/articles/cost-management-billing/manage/create-subscription.md index 0612a15123be..02838141b73f 100644 --- a/articles/cost-management-billing/manage/create-subscription.md +++ b/articles/cost-management-billing/manage/create-subscription.md @@ -1,102 +1,71 @@ --- -title: Create an additional Azure subscription -description: Learn how to add a new Azure subscription in the Azure portal. See information about billing account forms and view additional available resources. +title: Create a Microsoft Customer Agreement subscription +titleSuffix: Azure Cost Management + Billing +description: Learn how to add a new Microsoft Customer Agreement subscription in the Azure portal. See information about billing account forms and view other available resources. author: bandersmsft ms.reviewer: amberb ms.service: cost-management-billing ms.subservice: billing ms.topic: conceptual -ms.date: 11/11/2021 +ms.date: 05/25/2022 ms.author: banders --- -# Create an additional Azure subscription +# Create a Microsoft Customer Agreement subscription -You can create an additional subscription for your [Enterprise Agreement (EA)](https://azure.microsoft.com/pricing/enterprise-agreement/), [Microsoft Customer Agreement](https://azure.microsoft.com/pricing/purchase-options/microsoft-customer-agreement/) or [Microsoft Partner Agreement](https://www.microsoft.com/licensing/news/introducing-microsoft-partner-agreement) billing account in the Azure portal. You may want an additional subscription to avoid hitting subscription limits, to create separate environments for security, or to isolate data for compliance reasons. +This article helps you create a [Microsoft Customer Agreement](https://azure.microsoft.com/pricing/purchase-options/microsoft-customer-agreement/) subscription for yourself or for someone else in your current Azure Active Directory (Azure AD) directory/tenant. You may want another subscription to avoid hitting subscription quota limits, to create separate environments for security, or to isolate data for compliance reasons. -If you have a Microsoft Online Service Program (MOSP) billing account, you can create additional subscriptions in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade). +If you want to create a Microsoft Customer Agreement subscription in a different Azure AD tenant, see [Create an MCA subscription request](create-subscription-request.md). -To learn more about billing accounts and identify the type of your billing account, see [View billing accounts in Azure portal](view-all-accounts.md). +If you want to create subscriptions for Enterprise Agreements, see [Create an EA subscription](create-enterprise-subscription.md). If you're a Microsoft Partner and you want to create a subscription for a customer, see [Create a subscription for a partner's customer](create-customer-subscription.md). Or, if you have a Microsoft Online Service Program (MOSP) billing account, also called pay-as-you-go, you can create subscriptions starting in the [Azure portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade) and then you complete the process at https://signup.azure.com/. -## Permission required to create Azure subscriptions - -You need the following permissions to create subscriptions: - -|Billing account |Permission | -|---------|---------| -|Enterprise Agreement (EA) | Account Owner role on the Enterprise Agreement enrollment. For more information, see [Understand Azure Enterprise Agreement administrative roles in Azure](understand-ea-roles.md). | -|Microsoft Customer Agreement (MCA) | Owner or contributor role on the invoice section, billing profile or billing account. Or Azure subscription creator role on the invoice section. For more information, see [Subscription billing roles and task](understand-mca-roles.md#subscription-billing-roles-and-tasks). | -|Microsoft Partner Agreement (MPA) | Global Admin and Admin Agent role in the CSP partner organization. To learn more, see [Partner Center - Assign users roles and permissions](/partner-center/permissions-overview). The user needs to sign to partner tenant to create Azure subscriptions. | - -## Create a subscription in the Azure portal - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Search for **Subscriptions**. - - ![Screenshot that shows search in portal for subscription](./media/create-subscription/billing-search-subscription-portal.png) - -1. Select **Add**. - - ![Screenshot that shows the Add button in Subscriptions view](./media/create-subscription/subscription-add.png) - -1. If you have access to multiple billing accounts, select the billing account for which you want to create the subscription. - -1. Fill the form and select **Create**. The tables below list the fields on the form for each type of billing account. +To learn more about billing accounts and identify your billing account type, see [View billing accounts in Azure portal](view-all-accounts.md). -**Enterprise Agreement** - -|Field |Definition | -|---------|---------| -|Name | The display name that helps you easily identify the subscription in the Azure portal. | -|Offer | Select EA Dev/Test, if you plan to use this subscription for development or testing workloads else use Microsoft Azure Enterprise. DevTest offer must be enabled for your enrollment account to create EA Dev/Test subscriptions.| - -**Microsoft Customer Agreement** - -|Field |Definition | -|---------|---------| -|Billing profile | The charges for your subscription will be billed to the billing profile that you select. If you have access to only one billing profile, the selection will be greyed out. | -|Invoice section | The charges for your subscription will appear on this section of the billing profile's invoice. If you have access to only one invoice section, the selection will be greyed out. | -|Plan | Select Microsoft Azure Plan for DevTest, if you plan to use this subscription for development or testing workloads else use Microsoft Azure Plan. If only one plan is enabled for the billing profile, the selection will be greyed out. | -|Name | The display name that helps you easily identify the subscription in the Azure portal. | - -**Microsoft Partner Agreement** +## Permission required to create Azure subscriptions -|Field |Definition | -|---------|---------| -|Customer | The subscription is created for the customer that you select. If you have only one customer, the selection will be greyed out. | -|Reseller | The reseller that will provide services to the customer. This is an optional field, which is only applicable to Indirect providers in the CSP two-tier model. | -|Name | The display name that helps you easily identify the subscription in the Azure portal. | +You need the following permissions to create subscriptions for a Microsoft Customer Agreement (MCA): -## Create a subscription as a partner for a customer +- Owner or contributor role on the invoice section, billing profile or billing account. Or Azure subscription creator role on the invoice section. -Partners with a Microsoft Partner Agreement use the following steps to create a new Microsoft Azure Plan subscription for their customers. The subscription is created under the partner’s billing account and billing profile. +For more information, see [Subscription billing roles and task](understand-mca-roles.md#subscription-billing-roles-and-tasks). -1. Sign in to the Azure portal using your Partner Center account. -Make sure you are in your Partner Center directory (tenant), not a customer’s tenant. -1. Navigate to **Cost Management + Billing**. -1. Select the Billing scope for the billing account where the customer account resides. -1. In the left menu under **Billing**, select **Customers**. -1. On the Customers page, select the customer. -1. In the left menu, under **Products + services**, select **Azure Subscriptions**. -1. On the Azure subscription page, select **+ Add** to create a subscription. -1. Enter details about the subscription and when complete, select **Review + create**. +## Create a subscription +Use the following procedure to create a subscription for yourself or for someone in the current Azure Active Directory. When you're done, the new subscription is created immediately. -## Create an additional Azure subscription programmatically +1. Sign in to the [Azure portal](https://portal.azure.com). +1. Navigate to **Subscriptions** and then select **Add**. + :::image type="content" source="./media/create-subscription/subscription-add.png" alt-text="Screenshot showing the Subscription page where you Add a subscription." lightbox="./media/create-subscription/subscription-add.png" ::: +1. On the Create a subscription page, on the **Basics** tab, type a **Subscription name**. +1. Select the **Billing account** where the new subscription will get created. +1. Select the **Billing profile** where the subscription will get created. +1. Select the **Invoice section** where the subscription will get created. +1. Next to **Plan**, select **Microsoft Azure Plan for DevTest** if the subscription will be used for development or testing workloads. Otherwise, select **Microsoft Azure Plan**. + :::image type="content" source="./media/create-subscription/create-subscription-basics-tab.png" alt-text="Screenshot showing the Basics tab where you enter basic information about the subscription." lightbox="./media/create-subscription/create-subscription-basics-tab.png" ::: +1. Select the **Advanced** tab. +1. Select your **Subscription directory**. It's the Azure Active Directory (Azure AD) where the new subscription will get created. +1. Select a **Management group**. It's the Azure AD management group that the new subscription is associated with. You can only select management groups in the current directory. +1. Select more or more **Subscription owners**. You can select only users or service principals in the selected subscription directory. You can't select guest directory users. If you select a service principal, enter its App ID. + :::image type="content" source="./media/create-subscription/create-subscription-advanced-tab.png" alt-text="Screenshot showing the Advanced tab where you can specify the directory, management group, and owner. " lightbox="./media/create-subscription/create-subscription-advanced-tab.png" ::: +1. Select the **Tags** tab. +1. Enter tag pairs for **Name** and **Value**. + :::image type="content" source="./media/create-subscription/create-subscription-tags-tab.png" alt-text="Screenshot showing the tags tab where you enter tag and value pairs." lightbox="./media/create-subscription/create-subscription-tags-tab.png" ::: +1. Select **Review + create**. You should see a message stating `Validation passed`. +1. Verify that the subscription information is correct, then select **Create**. You'll see a notification that the subscription is getting created. + +After the new subscription is created, the owner of the subscription can see it in on the **Subscriptions** page. + +## Create an Azure subscription programmatically + +You can also create subscriptions programmatically. For more information, see [Create Azure subscriptions programmatically](programmatically-create-subscription.md). -You can also create additional subscriptions programmatically. For more information, see: +## Need help? Contact us. -- [Create EA subscriptions programmatically with latest API](programmatically-create-subscription-enterprise-agreement.md) -- [Create MCA subscriptions programmatically with latest API](programmatically-create-subscription-microsoft-customer-agreement.md) -- [Create MPA subscriptions programmatically with latest API](Programmatically-create-subscription-microsoft-customer-agreement.md) +If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). ## Next steps - [Add or change Azure subscription administrators](add-change-subscription-administrator.md) - [Move resources to new resource group or subscription](../../azure-resource-manager/management/move-resource-group-and-subscription.md) - [Create management groups for resource organization and management](../../governance/management-groups/create-management-group-portal.md) -- [Cancel your subscription for Azure](cancel-azure-subscription.md) - -## Need help? Contact us. - -If you have questions or need help, [create a support request](https://go.microsoft.com/fwlink/?linkid=2083458). \ No newline at end of file +- [Cancel your Azure subscription](cancel-azure-subscription.md) \ No newline at end of file diff --git a/articles/cost-management-billing/manage/media/create-customer-subscription/all-billing-subscriptions-add.png b/articles/cost-management-billing/manage/media/create-customer-subscription/all-billing-subscriptions-add.png new file mode 100644 index 000000000000..b73b7e98c95b Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-customer-subscription/all-billing-subscriptions-add.png differ diff --git a/articles/cost-management-billing/manage/media/create-customer-subscription/create-customer-subscription-basics-tab.png b/articles/cost-management-billing/manage/media/create-customer-subscription/create-customer-subscription-basics-tab.png new file mode 100644 index 000000000000..cd57daccf917 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-customer-subscription/create-customer-subscription-basics-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-customer-subscription/customers-list.png b/articles/cost-management-billing/manage/media/create-customer-subscription/customers-list.png new file mode 100644 index 000000000000..b968faeffbdf Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-customer-subscription/customers-list.png differ diff --git a/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-advanced-tab.png b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-advanced-tab.png new file mode 100644 index 000000000000..85aff0595835 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-advanced-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png new file mode 100644 index 000000000000..23f8ec0d7280 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-basics-tab-enterprise-agreement.png differ diff --git a/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-tags-tab.png b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-tags-tab.png new file mode 100644 index 000000000000..4967c06ea19e Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-enterprise-subscription/create-subscription-tags-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-enterprise-subscription/subscription-add.png b/articles/cost-management-billing/manage/media/create-enterprise-subscription/subscription-add.png new file mode 100644 index 000000000000..109b8445b07e Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-enterprise-subscription/subscription-add.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/accept-subscription-ownership-email.png b/articles/cost-management-billing/manage/media/create-subscription-request/accept-subscription-ownership-email.png new file mode 100644 index 000000000000..95d91fc48eb7 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/accept-subscription-ownership-email.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-advanced-tab-external.png b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-advanced-tab-external.png new file mode 100644 index 000000000000..ee04fe39962e Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-advanced-tab-external.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-basics-tab.png b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-basics-tab.png new file mode 100644 index 000000000000..fd2928db34ea Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-basics-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-tags-tab.png b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-tags-tab.png new file mode 100644 index 000000000000..4967c06ea19e Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/create-subscription-tags-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/subscription-add.png b/articles/cost-management-billing/manage/media/create-subscription-request/subscription-add.png new file mode 100644 index 000000000000..109b8445b07e Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/subscription-add.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription-request/view-requests-accept-url.png b/articles/cost-management-billing/manage/media/create-subscription-request/view-requests-accept-url.png new file mode 100644 index 000000000000..21a2868075fa Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription-request/view-requests-accept-url.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/billing-search-subscription-portal.png b/articles/cost-management-billing/manage/media/create-subscription/billing-search-subscription-portal.png deleted file mode 100644 index d3560f3fbee2..000000000000 Binary files a/articles/cost-management-billing/manage/media/create-subscription/billing-search-subscription-portal.png and /dev/null differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/create-subscription-advanced-tab.png b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-advanced-tab.png new file mode 100644 index 000000000000..85aff0595835 Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-advanced-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/create-subscription-basics-tab.png b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-basics-tab.png new file mode 100644 index 000000000000..fd2928db34ea Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-basics-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/create-subscription-tags-tab.png b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-tags-tab.png new file mode 100644 index 000000000000..4967c06ea19e Binary files /dev/null and b/articles/cost-management-billing/manage/media/create-subscription/create-subscription-tags-tab.png differ diff --git a/articles/cost-management-billing/manage/media/create-subscription/subscription-add.png b/articles/cost-management-billing/manage/media/create-subscription/subscription-add.png index 27658ca946c4..109b8445b07e 100644 Binary files a/articles/cost-management-billing/manage/media/create-subscription/subscription-add.png and b/articles/cost-management-billing/manage/media/create-subscription/subscription-add.png differ diff --git a/articles/cost-management-billing/toc.yml b/articles/cost-management-billing/toc.yml index b825d8021179..b81b74f0ba39 100644 --- a/articles/cost-management-billing/toc.yml +++ b/articles/cost-management-billing/toc.yml @@ -195,8 +195,14 @@ href: manage/azurestudents-subscription-disabled.md - name: Set up and configure AWS integration href: costs/aws-integration-set-up-configure.md - - name: Create additional subscriptions + - name: Create an MCA subscription href: manage/create-subscription.md + - name: Create an MCA subscription request + href: manage/create-subscription-request.md + - name: Create an EA subscription + href: manage/create-enterprise-subscription.md + - name: Create a subscription for a partner's customer + href: manage/create-customer-subscription.md - name: Grant access to create EA subscriptions href: manage/grant-access-to-create-subscription.md - name: Change administrator diff --git a/articles/cost-management-billing/understand/mca-overview.md b/articles/cost-management-billing/understand/mca-overview.md index cfa531879bdb..a50560cd8a16 100644 --- a/articles/cost-management-billing/understand/mca-overview.md +++ b/articles/cost-management-billing/understand/mca-overview.md @@ -6,7 +6,7 @@ ms.reviewer: amberbhargava ms.service: cost-management-billing ms.subservice: billing ms.topic: conceptual -ms.date: 09/15/2021 +ms.date: 05/26/2022 ms.author: banders --- @@ -61,7 +61,7 @@ Azure plans determine the pricing and service level agreements for Azure subscri | Plan | Definition | |------------------|-------------| |Microsoft Azure Plan | Allow users to create subscriptions that can run any workloads. | -|Microsoft Azure Plan for Dev/Test | Allow Visual Studio subscribers to create subscriptions that are restricted for development or testing workloads. These subscriptions get benefits such as lower rates and access to exclusive virtual machine images in the Azure portal. | +|Microsoft Azure Plan for Dev/Test | Allow Visual Studio subscribers to create subscriptions that are restricted for development or testing workloads. These subscriptions get benefits such as lower rates and access to exclusive virtual machine images in the Azure portal. Azure Plan for DevTest is only available for Microsoft Customer Agreement customers who purchase through a Microsoft Sales representative. | ## Invoice sections diff --git a/articles/cost-management-billing/understand/pay-bill.md b/articles/cost-management-billing/understand/pay-bill.md index 06fc4f803f29..6ebad4130aac 100644 --- a/articles/cost-management-billing/understand/pay-bill.md +++ b/articles/cost-management-billing/understand/pay-bill.md @@ -34,6 +34,10 @@ On 1 October 2021, automatic payments in India may block some credit card transa [Learn more about the Reserve Bank of India regulation for recurring payments](https://www.rbi.org.in/Scripts/NotificationUser.aspx?Id=11668&Mode=0) +On 1 July 2022, Microsoft and other online merchants will no longer be storing credit card information. To comply with this regulation Microsoft will be removing all stored card details from Microsoft Azure. To avoid service interruption, you will need to add a payment method and make a one-time payment for all invoices. + +[Learn about the Reserve Bank of India regulation for card storage](https://www.rbi.org.in/Scripts/NotificationUser.aspx?Id=12211) + ## Pay by default payment method The default payment method of your billing profile can either be a credit card, debit card, or check wire transfer. diff --git a/articles/data-factory/TOC.yml b/articles/data-factory/TOC.yml index 5bf67534e97f..b6b6f280808f 100644 --- a/articles/data-factory/TOC.yml +++ b/articles/data-factory/TOC.yml @@ -347,6 +347,8 @@ items: href: connector-amazon-simple-storage-service.md - name: Amazon S3 Compatible Storage href: connector-amazon-s3-compatible-storage.md + - name: Asana + href: connector-asana.md - name: Avro format href: format-avro.md - name: Azure Blob Storage @@ -820,6 +822,10 @@ items: href: data-factory-private-link.md - name: Azure security baseline href: /security/benchmark/azure/baselines/data-factory-security-baseline?toc=/azure/data-factory/TOC.json + - name: Settings + items: + - name: Manage Azure Data Factory settings and preferences + href: how-to-manage-settings.md - name: Monitor and manage items: - name: Monitor visually diff --git a/articles/data-factory/connector-asana.md b/articles/data-factory/connector-asana.md new file mode 100644 index 000000000000..5ac33af17fd6 --- /dev/null +++ b/articles/data-factory/connector-asana.md @@ -0,0 +1,112 @@ +--- +title: Transform data in Asana (Preview) +titleSuffix: Azure Data Factory & Azure Synapse +description: Learn how to transform data in Asana (Preview) by using Data Factory or Azure Synapse Analytics. +ms.author: jianleishen +author: jianleishen +ms.service: data-factory +ms.subservice: data-movement +ms.topic: conceptual +ms.custom: synapse +ms.date: 05/20/2022 +--- + +# Transform data in Asana (Preview) using Azure Data Factory or Synapse Analytics + +[!INCLUDE[appliesto-adf-asa-md](includes/appliesto-adf-asa-md.md)] + +This article outlines how to use Data Flow to transform data in Asana (Preview). To learn more, read the introductory article for [Azure Data Factory](introduction.md) or [Azure Synapse Analytics](../synapse-analytics/overview-what-is.md). + +> [!IMPORTANT] +> This connector is currently in preview. You can try it out and give us feedback. If you want to take a dependency on preview connectors in your solution, please contact [Azure support](https://azure.microsoft.com/support/). + +## Supported capabilities + +This Asana connector is supported for the following activities: + +- [Mapping data flow](concepts-data-flow-overview.md) + +## Create an Asana linked service using UI + +Use the following steps to create an Asana linked service in the Azure portal UI. + +1. Browse to the Manage tab in your Azure Data Factory or Synapse workspace and select Linked Services, then select New: + + # [Azure Data Factory](#tab/data-factory) + + :::image type="content" source="media/doc-common-process/new-linked-service.png" alt-text="Screenshot of creating a new linked service with Azure Data Factory U I."::: + + # [Azure Synapse](#tab/synapse-analytics) + + :::image type="content" source="media/doc-common-process/new-linked-service-synapse.png" alt-text="Screenshot of creating a new linked service with Azure Synapse U I."::: + +2. Search for Asana (Preview) and select the Asana (Preview) connector. + + :::image type="content" source="media/connector-asana/asana-connector.png" alt-text="Screenshot showing selecting Asana connector."::: + +3. Configure the service details, test the connection, and create the new linked service. + + :::image type="content" source="media/connector-asana/configure-asana-linked-service.png" alt-text="Screenshot of configuration for Asana linked service."::: + +## Connector configuration details + +The following sections provide information about properties that are used to define Data Factory and Synapse pipeline entities specific to Asana. + +## Linked service properties + +The following properties are supported for the Asana linked service: + +| Property | Description | Required | +|:--- |:--- |:--- | +| type | The type property must be set to **Asana**. |Yes | +| apiToken | Specify an API token for the Asana. Mark this field as **SecureString** to store it securely. Or, you can [reference a secret stored in Azure Key Vault](store-credentials-in-key-vault.md). |Yes | + +**Example:** + +```json +{ + "name": "AsanaLinkedService", + "properties": { + "type": "Asana", + "typeProperties": { + "apiToken": { + "type": "SecureString", + "value": "" + } + } + } +} +``` + +## Mapping data flow properties + +When transforming data in mapping data flow, you can read tables from Asana. For more information, see the [source transformation](data-flow-source.md) in mapping data flows. You can only use an [inline dataset](data-flow-source.md#inline-datasets) as source type. + +### Source transformation + +The below table lists the properties supported by Asana source. You can edit these properties in the **Source options** tab. + +| Name | Description | Required | Allowed values | Data flow script property | +| ---- | ----------- | -------- | -------------- | ---------------- | +| Workspace | The ID of the workspace in Asana. | Yes | String | workspaceId | +| Entity | The ID of the entity in Asana.| Yes | String | entityId | +| Entity Type | The type of the entity in Asana. | Yes | `teams`
    `portfolios`
    `projects` | entityType | + + +#### Asana source script examples + +When you use Asana as source type, the associated data flow script is: + +``` +source(allowSchemaDrift: true, + validateSchema: false, + store: 'asana', + format: 'rest', + workspaceId: '9876543210', + entityId: '1234567890', + entityType: 'teams') ~> AsanaSource +``` + +## Next steps + +For a list of data stores supported as sources and sinks by the copy activity, see [Supported data stores](copy-activity-overview.md#supported-data-stores-and-formats). diff --git a/articles/data-factory/continuous-integration-delivery-improvements.md b/articles/data-factory/continuous-integration-delivery-improvements.md index a58684700750..d06127179549 100644 --- a/articles/data-factory/continuous-integration-delivery-improvements.md +++ b/articles/data-factory/continuous-integration-delivery-improvements.md @@ -161,7 +161,7 @@ Follow these steps to get started: inputs: command: 'custom' workingDir: '$(Build.Repository.LocalPath)/' #replace with the package.json folder - customCommand: 'run build validate $(Build.Repository.LocalPath)/ /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testResourceGroup/providers/Microsoft.DataFactory/factories/' + customCommand: 'run build validate $(Build.Repository.LocalPath)/ /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups//providers/Microsoft.DataFactory/factories/' displayName: 'Validate' # Validate and then generate the ARM template into the destination folder, which is the same as selecting "Publish" from the UX. @@ -171,7 +171,7 @@ Follow these steps to get started: inputs: command: 'custom' workingDir: '$(Build.Repository.LocalPath)/' #replace with the package.json folder - customCommand: 'run build export $(Build.Repository.LocalPath)/ /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testResourceGroup/providers/Microsoft.DataFactory/factories/ "ArmTemplate"' + customCommand: 'run build export $(Build.Repository.LocalPath)/ /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups//providers/Microsoft.DataFactory/factories/ "ArmTemplate"' displayName: 'Validate and Generate ARM template' # Publish the artifact to be used as a source for a release pipeline. diff --git a/articles/data-factory/how-to-manage-settings.md b/articles/data-factory/how-to-manage-settings.md new file mode 100644 index 000000000000..07649cc4dcc2 --- /dev/null +++ b/articles/data-factory/how-to-manage-settings.md @@ -0,0 +1,68 @@ +--- +title: Managing Azure Data Factory settings and preferences +description: Learn how to manage Azure Data Factory settings and preferences. +author: n0elleli +ms.author: noelleli +ms.reviewer: +ms.service: data-factory +ms.subservice: tutorials +ms.topic: tutorial +ms.custom: seo-lt-2019 +ms.date: 05/24/2022 +--- + +# Manage Azure Data Factory settings and preferences + +[!INCLUDE[appliesto-adf-asa-md](includes/appliesto-adf-asa-md.md)] + +You can change the default settings of your Azure Data Factory to meet your own preferences. +Azure Data Factory settings are available in the Settings menu in the top right section of the global page header as indicated in the screenshot below. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-1.png" alt-text="Screenshot of settings gear in top right corner of page banner."::: + +Clicking the **Settings** gear button will open a flyout. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-2.png" alt-text="Screenshot of settings flyout with three setting options."::: + +Here you can find the settings and preferences that you can set for your data factory. + +## Language and Region + +Choose your language and the regional format that will influence how data such as dates and currency will appear in your data factory. + +### Language + +Use the drop-down list to select from the list of available languages. This setting controls the language you see for text throughout your data factory. There are 18 languages supported in addition to English. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-3.png" alt-text="Screenshot of drop-down list of languages that users can choose from."::: + +To apply changes, select a language and make sure to hit the **Apply** button. Your page will refresh and reflect the changes made. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-4.png" alt-text="Screenshot of Apply button in the bottom left corner to make language changes."::: + +> [!NOTE] +> Applying language changes will discard any unsaved changes in your data factory. + +### Regional Format + +Use the drop-down list to select from the list of available regional formats. This setting controls the way dates, time, numbers, and currency are shown in your data factory. + +The default shown in **Regional format** will automatically change based on the option you selected for **Language**. You can still use the drop-down list to select a different format. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-5.png" alt-text="Screenshot of drop-down list of regional formats that users can choose from. "::: + +For example, if you select **English** as your language and select **English (United States)** as the regional format, currency will be show in U.S. (United States) dollars. If you select **English** as your language and select **English (Europe)** as the regional format, currency will be show in euros. + +To apply changes, select a **Regional format** and make sure to hit the **Apply** button. Your page will refresh and reflect the changes made. + +:::image type="content" source="media/how-to-manage-settings/adf-settings-6.png" alt-text="Screenshot of Apply button in the bottom left corner to make regional format changes."::: + +> [!NOTE] +> Applying regional format changes will discard any unsaved changes in your data factory. + +## Next steps +- [Introduction to Azure Data Factory](introduction.md) +- [Build a pipeline with a copy activity](quickstart-create-data-factory-powershell.md) +- [Build a pipeline with a data transformation activity](tutorial-transform-data-spark-powershell.md) + + diff --git a/articles/data-factory/how-to-sqldb-to-cosmosdb.md b/articles/data-factory/how-to-sqldb-to-cosmosdb.md index c8d3ccb13370..f2ea5039d799 100644 --- a/articles/data-factory/how-to-sqldb-to-cosmosdb.md +++ b/articles/data-factory/how-to-sqldb-to-cosmosdb.md @@ -17,7 +17,7 @@ SQL schemas are typically modeled using third normal form, resulting in normaliz Using Azure Data Factory, we'll build a pipeline that uses a single Mapping Data Flow to read from two Azure SQL Database normalized tables that contain primary and foreign keys as the entity relationship. ADF will join those tables into a single stream using the data flow Spark engine, collect joined rows into arrays and produce individual cleansed documents for insert into a new Azure Cosmos DB container. -This guide will build a new container on the fly called "orders" that will use the ```SalesOrderHeader``` and ```SalesOrderDetail``` tables from the standard SQL Server [Adventure Works sample database](https://docs.microsoft.com/sql/samples/adventureworks-install-configure?view=sql-server-ver15&tabs=ssms). Those tables represent sales transactions joined by ```SalesOrderID```. Each unique detail records has its own primary key of ```SalesOrderDetailID```. The relationship between header and detail is ```1:M```. We'll join on ```SalesOrderID``` in ADF and then roll each related detail record into an array called "detail". +This guide will build a new container on the fly called "orders" that will use the ```SalesOrderHeader``` and ```SalesOrderDetail``` tables from the standard SQL Server [Adventure Works sample database](/sql/samples/adventureworks-install-configure?tabs=ssms&view=sql-server-ver15). Those tables represent sales transactions joined by ```SalesOrderID```. Each unique detail records has its own primary key of ```SalesOrderDetailID```. The relationship between header and detail is ```1:M```. We'll join on ```SalesOrderID``` in ADF and then roll each related detail record into an array called "detail". The representative SQL query for this guide is: @@ -101,4 +101,4 @@ If everything looks good, you are now ready to create a new pipeline, add this d ## Next steps * Build the rest of your data flow logic by using mapping data flows [transformations](concepts-data-flow-overview.md). -* [Download the completed pipeline template](https://github.com/kromerm/adfdataflowdocs/blob/master/sampledata/SQL%20Orders%20to%20CosmosDB.zip) for this tutorial and import the template into your factory. +* [Download the completed pipeline template](https://github.com/kromerm/adfdataflowdocs/blob/master/sampledata/SQL%20Orders%20to%20CosmosDB.zip) for this tutorial and import the template into your factory. \ No newline at end of file diff --git a/articles/data-factory/media/connector-asana/asana-connector.png b/articles/data-factory/media/connector-asana/asana-connector.png new file mode 100644 index 000000000000..c9cf6ac3c55d Binary files /dev/null and b/articles/data-factory/media/connector-asana/asana-connector.png differ diff --git a/articles/data-factory/media/connector-asana/configure-asana-linked-service.png b/articles/data-factory/media/connector-asana/configure-asana-linked-service.png new file mode 100644 index 000000000000..723554fbdc18 Binary files /dev/null and b/articles/data-factory/media/connector-asana/configure-asana-linked-service.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-1.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-1.png new file mode 100644 index 000000000000..eba788afc8a2 Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-1.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-2.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-2.png new file mode 100644 index 000000000000..d6b54cb71c17 Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-2.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-3.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-3.png new file mode 100644 index 000000000000..e8ef328a6a9f Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-3.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-4.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-4.png new file mode 100644 index 000000000000..bd31b1a9e954 Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-4.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-5.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-5.png new file mode 100644 index 000000000000..7f086881071e Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-5.png differ diff --git a/articles/data-factory/media/how-to-manage-settings/adf-settings-6.png b/articles/data-factory/media/how-to-manage-settings/adf-settings-6.png new file mode 100644 index 000000000000..4bda0197f602 Binary files /dev/null and b/articles/data-factory/media/how-to-manage-settings/adf-settings-6.png differ diff --git a/articles/data-factory/monitor-metrics-alerts.md b/articles/data-factory/monitor-metrics-alerts.md index 0d959d8da9d6..7a2c874d48ce 100644 --- a/articles/data-factory/monitor-metrics-alerts.md +++ b/articles/data-factory/monitor-metrics-alerts.md @@ -52,7 +52,7 @@ Here are some of the metrics emitted by Azure Data Factory version 2. | Total entities count | Total number of entities | Count | Total | The total number of entities in the Azure Data Factory instance. | | Total factory size (GB unit) | Total size of entities | Gigabyte | Total | The total size of entities in the Azure Data Factory instance. | -For service limits and quotas please see [quotas and limits](https://docs.microsoft.com/azure/azure-resource-manager/management/azure-subscription-service-limits#azure-data-factory-limits). +For service limits and quotas please see [quotas and limits](../azure-resource-manager/management/azure-subscription-service-limits.md#azure-data-factory-limits). To access the metrics, complete the instructions in [Azure Monitor data platform](../azure-monitor/data-platform.md). > [!NOTE] @@ -97,4 +97,4 @@ Sign in to the Azure portal, and select **Monitor** > **Alerts** to create alert ## Next steps -[Configure diagnostics settings and workspace](monitor-configure-diagnostics.md) +[Configure diagnostics settings and workspace](monitor-configure-diagnostics.md) \ No newline at end of file diff --git a/articles/data-factory/v1/data-factory-create-data-factories-programmatically.md b/articles/data-factory/v1/data-factory-create-data-factories-programmatically.md index b8619010fc69..cc2cacbbdaef 100644 --- a/articles/data-factory/v1/data-factory-create-data-factories-programmatically.md +++ b/articles/data-factory/v1/data-factory-create-data-factories-programmatically.md @@ -102,7 +102,7 @@ In the walkthrough, you create a data factory with a pipeline that contains a co The Copy Activity performs the data movement in Azure Data Factory. The activity is powered by a globally available service that can copy data between various data stores in a secure, reliable, and scalable way. See [Data Movement Activities](data-factory-data-movement-activities.md) article for details about the Copy Activity. > [!IMPORTANT] -> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](/azure/active-directory/develop/msal-migration) for more details. +> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](../../active-directory/develop/msal-migration.md) for more details. 1. Using Visual Studio 2012/2013/2015, create a C# .NET console application. 1. Launch **Visual Studio** 2012/2013/2015. @@ -498,4 +498,4 @@ while (response != null); ## Next steps See the following example for creating a pipeline using .NET SDK that copies data from an Azure blob storage to Azure SQL Database: -- [Create a pipeline to copy data from Blob Storage to SQL Database](data-factory-copy-activity-tutorial-using-dotnet-api.md) +- [Create a pipeline to copy data from Blob Storage to SQL Database](data-factory-copy-activity-tutorial-using-dotnet-api.md) \ No newline at end of file diff --git a/articles/databox-online/TOC.yml b/articles/databox-online/TOC.yml index 15116b98e0ba..e3bb8f2a2295 100644 --- a/articles/databox-online/TOC.yml +++ b/articles/databox-online/TOC.yml @@ -434,8 +434,11 @@ href: azure-stack-edge-gpu-activation-key-vault.md - name: Understand disconnected use href: azure-stack-edge-gpu-disconnected-scenario.md + - name: FAQ - Operational guidelines + href: azure-stack-edge-operational-guidelines-faq.yml - name: Understand data resiliency href: azure-stack-edge-gpu-data-resiliency.md + - name: Shared security items: - name: Security overview diff --git a/articles/databox-online/azure-stack-edge-gpu-connect-resource-manager.md b/articles/databox-online/azure-stack-edge-gpu-connect-resource-manager.md index 82bf03299da0..5338ba1f7bbe 100644 --- a/articles/databox-online/azure-stack-edge-gpu-connect-resource-manager.md +++ b/articles/databox-online/azure-stack-edge-gpu-connect-resource-manager.md @@ -458,7 +458,7 @@ Set the Azure Resource Manager environment and verify that your device to client ---- -------------------- ------------------------- AzASE https://management.myasegpu.wdshcsso.com/ https://login.myasegpu.wdshcsso.c... ``` - For more information, go to [Set-AzEnvironment](/powershell/module/azurerm.profile/set-azurermenvironment?view=azurermps-6.13.0&preserve-view=true). + For more information, go to [Set-AzEnvironment](/powershell/module/az.accounts/set-azenvironment?view=azps-7.5.0). - Define the environment inline for every cmdlet that you execute. This ensures that all the API calls are going through the correct environment. By default, the calls would go through the Azure public but you want these to go through the environment that you set for Azure Stack Edge device. diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-install.md b/articles/databox-online/azure-stack-edge-gpu-deploy-install.md index 8ddf5f712663..cab2c40a78f2 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-install.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-install.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 11/11/2021 +ms.date: 05/17/2022 ms.author: alkohli zone_pivot_groups: azure-stack-edge-device-deployment # Customer intent: As an IT admin, I need to understand how to install Azure Stack Edge Pro in datacenter so I can use it to transfer data to Azure. @@ -196,7 +196,7 @@ Before you start cabling your device, you need the following things: - At least one 1-GbE RJ-45 network cable to connect to the management interface. There are two 1-GbE network interfaces, one management and one data, on the device. - One 25/10-GbE SFP+ copper cable for each data network interface to be configured. At least one data network interface from among PORT 2, PORT 3, PORT 4, PORT 5, or PORT 6 needs to be connected to the Internet (with connectivity to Azure). - Access to two power distribution units (recommended). -- At least one 1-GbE network switch to connect a 1-GbE network interface to the Internet for data. The local web UI will not be accessible if the connected switch is not at least 1 GbE. If using 25/10-GbE interface for data, you will need a 25-GbE or 10-GbE switch. +- At least one 1-GbE network switch to connect a 1-GbE network interface to the Internet for data. The local web UI won't be accessible if the connected switch isn't at least 1 GbE. If using 25/10-GbE interface for data, you'll need a 25-GbE or 10-GbE switch. > [!NOTE] > - If you are connecting only one data network interface, we recommend that you use a 25/10-GbE network interface such as PORT 3, PORT 4, PORT 5, or PORT 6 to send data to Azure. @@ -210,10 +210,10 @@ Before you start cabling your device, you need the following things: Before you start cabling your device, you need the following things: - Both of your Azure Stack Edge physical devices, unpacked, and rack mounted. -- 4 power cables, 2 for each device node. +- Four power cables, two for each device node. - At least two 1-GbE RJ-45 network cables to connect Port 1 on each device node for initial configuration. - At least two 1-GbE RJ-45 network cables to connect Port 2 on each device node to the internet (with connectivity to Azure). -- 25/10-GbE SFP+ copper cables for Port 3 and Port 4 to be configured. Additional 25/10-GbR SFP+ copper cables if you will also connect Port 5 and Port 6. Port 5 and Port 6 must be connected if you intend to [Deploy network functions on Azure Stack Edge](../network-function-manager/deploy-functions.md). +- 25/10-GbE SFP+ copper cables for Port 3 and Port 4 to be configured. Additional 25/10-GbR SFP+ copper cables if you'll also connect Port 5 and Port 6. Port 5 and Port 6 must be connected if you intend to [Deploy network functions on Azure Stack Edge](../network-function-manager/deploy-functions.md). - 25-GbE or 10-GbE switches if opting for a switched network topology. See [Supported network topologies](azure-stack-edge-gpu-clustering-overview.md). - Access to two power distribution units (recommended). @@ -253,7 +253,10 @@ The backplane of Azure Stack Edge device: For a full list of supported cables, switches, and transceivers for these network adapter cards, see: - [`Qlogic` Cavium 25G NDC adapter interoperability matrix](https://www.marvell.com/documents/xalflardzafh32cfvi0z/). -- 25 GbE and 10 GbE cables and modules in [Mellanox dual port 25G ConnectX-4 channel network adapter compatible products](https://docs.mellanox.com/display/ConnectX4LxFirmwarev14271016/Firmware+Compatible+Products). +- 25 GbE and 10 GbE cables and modules in [Mellanox dual port 25G ConnectX-4 channel network adapter compatible products](https://docs.mellanox.com/display/ConnectX4LxFirmwarev14271016/Firmware+Compatible+Products). + +> [!NOTE] +> Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. ### Power cabling @@ -350,7 +353,7 @@ Use this configuration when you need port level redundancy through teaming. #### Connect Port 3 via switch -Use this configuration if you need an extra port for workload traffic and port level redundancy is not required. +Use this configuration if you need an extra port for workload traffic and port level redundancy isn't required. ![Back plane of clustered device cabled for networking with switches and without NIC teaming](./media/azure-stack-edge-gpu-deploy-install/backplane-clustered-device-networking-switches-without-nic-teaming.png) diff --git a/articles/databox-online/azure-stack-edge-gpu-deploy-set-up-device-update-time.md b/articles/databox-online/azure-stack-edge-gpu-deploy-set-up-device-update-time.md index 1c3a81620f32..cb6deceaab0f 100644 --- a/articles/databox-online/azure-stack-edge-gpu-deploy-set-up-device-update-time.md +++ b/articles/databox-online/azure-stack-edge-gpu-deploy-set-up-device-update-time.md @@ -7,30 +7,17 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 02/15/2022 +ms.date: 05/24/2022 ms.author: alkohli -zone_pivot_groups: azure-stack-edge-device-deployment # Customer intent: As an IT admin, I need to understand how to connect and activate Azure Stack Edge Pro so I can use it to transfer data to Azure. --- # Tutorial: Configure the device settings for Azure Stack Edge Pro GPU -::: zone pivot="single-node" - -This tutorial describes how to configure device related settings for your 1-node Azure Stack Edge Pro GPU device. You can set up your device name, update server, and time server via the local web UI. +This tutorial describes how to configure device related settings for your Azure Stack Edge Pro GPU device. You can set up your device name, update server, and time server via the local web UI. The device settings can take around 5-7 minutes to complete. -::: zone-end - -::: zone pivot="two-node" - -This tutorial describes how to configure device related settings for your 2-node Azure Stack Edge Pro GPU device. You can set up your device name, update server, and time server via the local web UI. - -The device settings can take around 5-7 minutes to complete. - -::: zone-end - In this tutorial, you learn about: > [!div class="checklist"] @@ -74,13 +61,6 @@ Follow these steps to configure device related settings: ![Local web UI "Device" page 3](./media/azure-stack-edge-gpu-deploy-set-up-device-update-time/device-4.png) -::: zone pivot="two-node" - -Repeat all the above steps for the second node of your device. Make sure that the same DNS domain is used for both the nodes. - -::: zone-end - - ## Configure update 1. On the **Update** page, you can now configure the location from where to download the updates for your device. @@ -99,13 +79,6 @@ Repeat all the above steps for the second node of your device. Make sure that th 1. Select **Apply**. 1. After the update server is configured, select **Next: Time**. -::: zone pivot="two-node" - -Repeat all the above steps for the second node of your device. Make sure that the same update server is used for both the nodes. - -::: zone-end - - ## Configure time Follow these steps to configure time settings on your device. @@ -131,12 +104,6 @@ NTP servers are required because your device must synchronize time so that it ca 1. After the settings are applied, select **Next: Certificates**. -::: zone pivot="two-node" - -Repeat all the above steps for the second node of your device. Make sure that the same NTP server is used for both the nodes. - -::: zone-end - ## Next steps In this tutorial, you learn about: diff --git a/articles/databox-online/azure-stack-edge-mini-r-deploy-install.md b/articles/databox-online/azure-stack-edge-mini-r-deploy-install.md index e49f57906850..528f624c4a61 100644 --- a/articles/databox-online/azure-stack-edge-mini-r-deploy-install.md +++ b/articles/databox-online/azure-stack-edge-mini-r-deploy-install.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 03/22/2021 +ms.date: 05/17/2022 ms.author: alkohli # Customer intent: As an IT admin, I need to understand how to install Azure Stack Edge Mini R device in datacenter so I can use it to transfer data to Azure. --- @@ -150,6 +150,9 @@ Take the following steps to cable your device for power and network. - If connecting PORT 2, use the RJ-45 network cable. - For the 10-GbE network interfaces, use the SFP+ copper cables. + > [!NOTE] + > Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. + ## Next steps In this tutorial, you learned about Azure Stack Edge topics such as how to: diff --git a/articles/databox-online/azure-stack-edge-operational-guidelines-faq.yml b/articles/databox-online/azure-stack-edge-operational-guidelines-faq.yml new file mode 100644 index 000000000000..006f8811ec0d --- /dev/null +++ b/articles/databox-online/azure-stack-edge-operational-guidelines-faq.yml @@ -0,0 +1,71 @@ +### YamlMime:FAQ +metadata: + title: Azure Stack Edge operational guidelines FAQ + description: Contains frequently asked questions and answers about Azure Stack Edge operations. + services: databox + author: alkohli + + ms.service: databox + ms.topic: faq + ms.date: 05/16/2022 + ms.author: alkohli + +title: "Azure Stack Edge operations: Frequently asked questions" +summary: | + Use the following operational guidelines to learn about using Azure Stack Edge in the Azure portal. + +sections: + - name: Ignored + questions: + - question: | + Who is responsible for the initial delivery of the device to the customer location? + answer: | + Microsoft will deliver your device using the contact details and shipping address provided in the order. + + - question: | + Who is responsible for the ongoing operation of the device? + answer: | + The customer is responsible for the day-to-day operation of the device, including: + - Power, network, storage, and peripheral device operation. + - Software operations, like application deployment, Kubernetes cluster operations, clustering, and virtual machine management. + - The device will be located on the customer's premises during regular operation, while the subscription is active. + - To create a support ticket with Microsoft, see the section below to [open a support ticket](#how-do-i-open-a-support-ticket-with-microsoft-). + + - question: | + What if the device is lost, damaged, or stolen while it's on-premises? + answer: | + If your device is lost, damaged, or stolen, you're responsible for promptly informing Microsoft and paying a fee. For more information, see the frequently asked questions on the [Azure Stack Edge pricing page](https://azure.microsoft.com/pricing/details/azure-stack/edge/). Once you submit a new order in the Azure portal, Microsoft may deliver a replacement device to you. + + - question: | + Who manages regular updates to enhance and improve the Azure Stack Edge platform? + answer: | + Microsoft releases periodic updates for firmware, BIOS, drivers, Kubernetes service, and other software-related updates. Software patches may be made available by Microsoft to address vulnerabilities and bug fixes, etc. When updates are available, you initiate installation at a time that's convenient for you. + For more information about updates for your device, see [Update your Azure Stack Edge Pro GPU](./azure-stack-edge-gpu-install-update.md?tabs=version-2106-and-later). + + - question: | + Who fixes software issues on my applications that run on Azure Stack Edge? + answer: | + You're responsible for fixing issues in the applications that you deploy on the Azure Stack Edge platform, even if you're consuming services like Kubernetes that are provided by the platform. To create a support ticket with Microsoft, see the section below to [open a support ticket](#how-do-i-open-a-support-ticket-with-microsoft-). + + - question: | + How do I replace an Azure Stack Edge device if there's a hardware failure? + answer: | + If Microsoft determines the device is faulty, Microsoft will arrange for replacement and delivery of devices. If you [Return your Azure Stack Edge device](azure-stack-edge-return-device.md?tabs=azure-edge-hardware-center), Microsoft will process the return of your device. + + - question: | + How do I open a support ticket with Microsoft? + answer: | + For issues that you can't address in-house, [open a support ticket](azure-stack-edge-contact-microsoft-support.md), and Microsoft will assess your questions. + + - For Azure Stack Edge and Azure Data Box Gateway issues like network interfaces, disk drives, or firmware, the ticket is assigned to Microsoft Support. + - If the device is faulty, damaged, or lost, and loss isn't the customer's fault, Microsoft may: + - Send a field support person to address the issue, or + - Replace the device. For more information, see the frequently asked questions section on the [Azure Stack Edge pricing page](https://azure.microsoft.com/pricing/details/azure-stack/edge/). + - If you have a software issue with your Kubernetes service that you can't fix yourself, or if you have a virtual machine management question that is not already documented, the Microsoft support team will consult with you to triage the issue and debug it remotely. + - To address software issues in the Azure Stack Edge platform or services that run on it, Microsoft may work with you directly to provide a fix or workaround, or they might provide a fix available via a software update. For more information about software updates for your device, see [Update your Azure Stack Edge Pro GPU](./azure-stack-edge-gpu-install-update.md?tabs=version-2106-and-later). + +additionalContent: | + + ## Next steps + + - Learn about [troubleshooting Azure Stack Edge device issues](azure-stack-edge-gpu-troubleshoot.md). \ No newline at end of file diff --git a/articles/databox-online/azure-stack-edge-pro-2-deploy-install.md b/articles/databox-online/azure-stack-edge-pro-2-deploy-install.md index e14d36576f08..5a0e46aed69c 100644 --- a/articles/databox-online/azure-stack-edge-pro-2-deploy-install.md +++ b/articles/databox-online/azure-stack-edge-pro-2-deploy-install.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 03/22/2022 +ms.date: 05/17/2022 ms.author: alkohli zone_pivot_groups: azure-stack-edge-device-deployment # Customer intent: As an IT admin, I need to understand how to install Azure Stack Edge Pro 2 in datacenter so I can use it to transfer data to Azure. @@ -334,6 +334,9 @@ Follow these steps to cable your device for network: ![Back plane of a cabled device](./media/azure-stack-edge-pro-2-deploy-install/cabled-backplane-1.png) + > [!NOTE] + > Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. + ::: zone-end ::: zone pivot="two-node" @@ -358,6 +361,8 @@ Cable your device as shown in the following diagram: 1. Connect Port 3 on one device directly (without a switch) to the Port 3 on the other device node. Use a QSFP28 passive direct attached cable (tested in-house) for the connection. 1. Connect Port 4 on one device directly (without a switch) to the Port 4 on the other device node. Use a QSFP28 passive direct attached cable (tested in-house) for the connection. + > [!NOTE] + > Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. #### Using external switches diff --git a/articles/databox-online/azure-stack-edge-pro-r-deploy-install.md b/articles/databox-online/azure-stack-edge-pro-r-deploy-install.md index 9335c24c2b67..6fe5c99af1c8 100644 --- a/articles/databox-online/azure-stack-edge-pro-r-deploy-install.md +++ b/articles/databox-online/azure-stack-edge-pro-r-deploy-install.md @@ -7,7 +7,7 @@ author: alkohli ms.service: databox ms.subservice: edge ms.topic: tutorial -ms.date: 3/22/2022 +ms.date: 5/17/2022 ms.author: alkohli # Customer intent: As an IT admin, I need to understand how to install Azure Stack Edge Pro R in datacenter so I can use it to transfer data to Azure. --- @@ -128,6 +128,9 @@ Take the following steps to cable your device for power and network. - If connecting PORT 2, use the RJ-45 network cable. - For the 10/25-GbE network interfaces, use the SFP+ copper cables. + > [!NOTE] + > Using USB ports to connect any external device, including keyboards and monitors, is not supported for Azure Stack Edge devices. + ## Next steps In this tutorial, you learned about Azure Stack Edge Pro R topics such as how to: diff --git a/articles/defender-for-cloud/TOC.yml b/articles/defender-for-cloud/TOC.yml index f17ae41955d1..9953ccf10963 100644 --- a/articles/defender-for-cloud/TOC.yml +++ b/articles/defender-for-cloud/TOC.yml @@ -390,8 +390,32 @@ href: https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/bg-p/MicrosoftDefenderCloudBlog - name: Microsoft Defender for Cloud on Stack Overflow href: https://stackoverflow.com/search?q=microsoft+defender+for+cloud - - name: MDfCinTheField YouTube videos - href: https://www.youtube.com/hashtag/mdfcinthefield + - name: Microsoft Defender for Cloud in the field + items: + - name: New AWS connector + href: episode-one.md + - name: Integrate with Azure Purview + href: episode-two.md + - name: Microsoft Defender for Containers + href: episode-three.md + - name: Security posture management improvements + href: episode-four.md + - name: Microsoft Defender for Servers + href: episode-five.md + - name: Lessons learned from the field + href: episode-six.md + - name: New GCP connector + href: episode-seven.md + - name: Microsoft Defender for IoT + href: episode-eight.md + - name: Microsoft Defender for Containers in a multi-cloud environment + href: episode-nine.md + - name: Protecting containers in GCP with Defender for Containers + href: episode-ten.md + - name: Threat landscape for containers + href: episode-eleven.md + - name: Enhanced workload protection features in Defender for Servers + href: episode-twelve.md - name: Pricing href: https://azure.microsoft.com/pricing/details/azure-defender/ - name: Regional availability diff --git a/articles/defender-for-cloud/defender-for-cloud-introduction.md b/articles/defender-for-cloud/defender-for-cloud-introduction.md index 0bc435d06ebd..ab27518fbabb 100644 --- a/articles/defender-for-cloud/defender-for-cloud-introduction.md +++ b/articles/defender-for-cloud/defender-for-cloud-introduction.md @@ -5,7 +5,7 @@ ms.topic: overview ms.author: benmansheim author: bmansheim ms.custom: mvc -ms.date: 05/11/2022 +ms.date: 05/19/2022 --- # What is Microsoft Defender for Cloud? @@ -145,6 +145,16 @@ Use the advanced protection tiles in the [workload protections dashboard](worklo > [!TIP] > Microsoft Defender for IoT is a separate product. You'll find all the details in [Introducing Microsoft Defender for IoT](../defender-for-iot/overview.md). +## Learn More + +If you would like to learn more about Defender for Cloud from a cybersecurity expert, check out [Lessons Learned from the Field](episode-six.md). + +You can also check out the following blogs: + +- [A new name for multi-cloud security: Microsoft Defender for Cloud](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/a-new-name-for-multi-cloud-security-microsoft-defender-for-cloud/ba-p/2943020) +- [Microsoft Defender for Cloud - Use cases](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/microsoft-defender-for-cloud-use-cases/ba-p/2953619) +- [Microsoft Defender for Cloud PoC Series - Microsoft Defender for Containers](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/microsoft-defender-for-cloud-poc-series-microsoft-defender-for/ba-p/3064644) + ## Next steps - To get started with Defender for Cloud, you need a subscription to Microsoft Azure. If you don't have a subscription, [sign up for a free trial](https://azure.microsoft.com/free/). diff --git a/articles/defender-for-cloud/defender-for-containers-enable.md b/articles/defender-for-cloud/defender-for-containers-enable.md index 5b4a95f4440e..1da0079aac6e 100644 --- a/articles/defender-for-cloud/defender-for-containers-enable.md +++ b/articles/defender-for-cloud/defender-for-containers-enable.md @@ -3,7 +3,7 @@ title: How to enable Microsoft Defender for Containers in Microsoft Defender for description: Enable the container protections of Microsoft Defender for Containers ms.topic: overview zone_pivot_groups: k8s-host -ms.date: 05/10/2022 +ms.date: 05/26/2022 --- # Enable Microsoft Defender for Containers @@ -25,7 +25,7 @@ Learn about this plan in [Overview of Microsoft Defender for Containers](defende ::: zone pivot="defender-for-container-arc,defender-for-container-eks,defender-for-container-gke" > [!NOTE] > Defender for Containers' support for Arc-enabled Kubernetes clusters, AWS EKS, and GCP GKE. This is a preview feature. -> +> > [!INCLUDE [Legalese](../../includes/defender-for-cloud-preview-legal-text.md)] ::: zone-end @@ -70,7 +70,7 @@ A full list of supported alerts is available in the [reference table of all Defe 1. In the Azure portal, open Microsoft Defender for Cloud's security alerts page and look for the alert on the relevant resource: :::image type="content" source="media/defender-for-kubernetes-azure-arc/sample-kubernetes-security-alert.png" alt-text="Sample alert from Microsoft Defender for Kubernetes." lightbox="media/defender-for-kubernetes-azure-arc/sample-kubernetes-security-alert.png"::: - + ::: zone pivot="defender-for-container-arc,defender-for-container-eks,defender-for-container-gke" [!INCLUDE [Remove the extension](./includes/defender-for-containers-remove-extension.md)] ::: zone-end @@ -87,6 +87,17 @@ A full list of supported alerts is available in the [reference table of all Defe [!INCLUDE [FAQ](./includes/defender-for-containers-override-faq.md)] ::: zone-end +## Learn More + +Learn more from the product manager about [Microsoft Defender for Containers in a multi-cloud environment](episode-nine.md). +You can also learn how to [Protect Containers in GCP with Defender for Containers](episode-ten.md). + +You can also check out the following blogs: + +- [Protect your Google Cloud workloads with Microsoft Defender for Cloud](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/protect-your-google-cloud-workloads-with-microsoft-defender-for/ba-p/3073360) +- [Introducing Microsoft Defender for Containers](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/introducing-microsoft-defender-for-containers/ba-p/2952317) +- [A new name for multi-cloud security: Microsoft Defender for Cloud](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/a-new-name-for-multi-cloud-security-microsoft-defender-for-cloud/ba-p/2943020) + ## Next steps -[Use Defender for Containers to scan your ACR images for vulnerabilities](defender-for-container-registries-usage.md). \ No newline at end of file +[Use Defender for Containers to scan your ACR images for vulnerabilities](defender-for-container-registries-usage.md). diff --git a/articles/defender-for-cloud/defender-for-containers-introduction.md b/articles/defender-for-cloud/defender-for-containers-introduction.md index 64a83f8dbde1..d36ae3a16e65 100644 --- a/articles/defender-for-cloud/defender-for-containers-introduction.md +++ b/articles/defender-for-cloud/defender-for-containers-introduction.md @@ -2,7 +2,7 @@ title: Container security with Microsoft Defender for Cloud description: Learn about Microsoft Defender for Containers ms.topic: overview -ms.date: 05/15/2022 +ms.date: 05/25/2022 --- # Overview of Microsoft Defender for Containers @@ -26,7 +26,7 @@ On this page, you'll learn how you can use Defender for Containers to improve, m Defender for Containers helps with the core aspects of container security: -- **Environment hardening** - Defender for Containers protects your Kubernetes clusters whether they're running on Azure Kubernetes Service, Kubernetes on-prem / IaaS, or Amazon EKS. By continuously assessing clusters, Defender for Containers provides visibility into misconfigurations and guidelines to help mitigate identified threats. Learn more in [Hardening](#hardening). +- **Environment hardening** - Defender for Containers protects your Kubernetes clusters whether they're running on Azure Kubernetes Service, Kubernetes on-premises / IaaS, or Amazon EKS. By continuously assessing clusters, Defender for Containers provides visibility into misconfigurations and guidelines to help mitigate identified threats. Learn more in [Hardening](#hardening). - **Vulnerability assessment** - Vulnerability assessment and management tools for images **stored** in ACR registries and **running** in Azure Kubernetes Service. Learn more in [Vulnerability assessment](#vulnerability-assessment). @@ -195,15 +195,15 @@ The following describes the components necessary in order to receive the full pr ## FAQ - Defender for Containers - [What are the options to enable the new plan at scale?](#what-are-the-options-to-enable-the-new-plan-at-scale) -- [Does Microsoft Defender for Containers support AKS clusters with virtual machines scale set (VMSS)?](#does-microsoft-defender-for-containers-support-aks-clusters-with-virtual-machines-scale-set-vmss) +- [Does Microsoft Defender for Containers support AKS clusters with virtual machines scale set?](#does-microsoft-defender-for-containers-support-aks-clusters-with-virtual-machines-scale-set) - [Does Microsoft Defender for Containers support AKS without scale set (default)?](#does-microsoft-defender-for-containers-support-aks-without-scale-set-default) - [Do I need to install the Log Analytics VM extension on my AKS nodes for security protection?](#do-i-need-to-install-the-log-analytics-vm-extension-on-my-aks-nodes-for-security-protection) ### What are the options to enable the new plan at scale? We’ve rolled out a new policy in Azure Policy, **Configure Microsoft Defender for Containers to be enabled**, to make it easier to enable the new plan at scale. -### Does Microsoft Defender for Containers support AKS clusters with virtual machines scale set (VMSS)? -Yes. +### Does Microsoft Defender for Containers support AKS clusters with virtual machines scale set? +Yes ### Does Microsoft Defender for Containers support AKS without scale set (default)? No. Only Azure Kubernetes Service (AKS) clusters that use virtual machine scale sets for the nodes is supported. @@ -211,6 +211,15 @@ No. Only Azure Kubernetes Service (AKS) clusters that use virtual machine scale ### Do I need to install the Log Analytics VM extension on my AKS nodes for security protection? No, AKS is a managed service, and manipulation of the IaaS resources isn't supported. The Log Analytics VM extension is not needed and may result in additional charges. +## Learn More + +If you would like to learn more from the product manager about Microsoft Defender for Containers, check out [Microsoft Defender for Containers](episode-three.md). + +You can also check out the following blogs: + +- [How to demonstrate the new containers features in Microsoft Defender for Cloud](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/how-to-demonstrate-the-new-containers-features-in-microsoft/ba-p/3281172) +- [Introducing Microsoft Defender for Containers](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/introducing-microsoft-defender-for-containers/ba-p/2952317) + ## Next steps In this overview, you learned about the core elements of container security in Microsoft Defender for Cloud. To enable the plan, see: diff --git a/articles/defender-for-cloud/defender-for-servers-introduction.md b/articles/defender-for-cloud/defender-for-servers-introduction.md index b6f0374a50a2..2116bd60387f 100644 --- a/articles/defender-for-cloud/defender-for-servers-introduction.md +++ b/articles/defender-for-cloud/defender-for-servers-introduction.md @@ -1,10 +1,8 @@ --- title: Microsoft Defender for Servers - the benefits and features description: Learn about the benefits and features of Microsoft Defender for Servers. -ms.date: 03/28/2022 +ms.date: 05/11/2022 ms.topic: overview -ms.author: benmansheim -author: bmansheim --- # Introduction to Microsoft Defender for Servers @@ -128,8 +126,15 @@ You can simulate alerts by downloading one of the following playbooks: - For Linux: [Microsoft Defender for Cloud Playbook: Linux Detections](https://github.com/Azure/Azure-Security-Center/blob/master/Simulations/Azure%20Security%20Center%20Linux%20Detections_v2.pdf). +## Learn more +If you would like to learn more from the product manager about Defender for Servers, check out [Microsoft Defender for Servers](episode-five.md). You can also learn about the [Enhanced workload protection features in Defender for Servers](episode-twelve.md). +You can also check out the following blogs: + +- [Security posture management and server protection for AWS and GCP are now generally available](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/security-posture-management-and-server-protection-for-aws-and/ba-p/3271388) + +- [Microsoft Defender for Cloud Server Monitoring Dashboard](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/microsoft-defender-for-cloud-server-monitoring-dashboard/ba-p/2869658) ## Next steps diff --git a/articles/defender-for-cloud/deploy-vulnerability-assessment-tvm.md b/articles/defender-for-cloud/deploy-vulnerability-assessment-tvm.md index 69d8a59c22f2..65f22d984add 100644 --- a/articles/defender-for-cloud/deploy-vulnerability-assessment-tvm.md +++ b/articles/defender-for-cloud/deploy-vulnerability-assessment-tvm.md @@ -2,9 +2,7 @@ title: Use Microsoft Defender for Endpoint's threat and vulnerability management capabilities with Microsoft Defender for Cloud description: Enable, deploy, and use Microsoft Defender for Endpoint's threat and vulnerability management capabilities with Microsoft Defender for Cloud to discover weaknesses in your Azure and hybrid machines ms.topic: how-to -ms.author: benmansheim -author: bmansheim -ms.date: 03/23/2022 +ms.date: 05/11/2022 --- # Investigate weaknesses with Microsoft Defender for Endpoint's threat and vulnerability management @@ -37,8 +35,6 @@ For a quick overview of threat and vulnerability management, watch this video: |Required roles and permissions:|[Owner](../role-based-access-control/built-in-roles.md#owner) (resource group level) can deploy the scanner
    [Security Reader](../role-based-access-control/built-in-roles.md#security-reader) can view findings| |Clouds:|:::image type="icon" source="./media/icons/yes-icon.png"::: Commercial clouds
    :::image type="icon" source="./media/icons/no-icon.png"::: National (Azure Government, Azure China 21Vianet)| - - ## Onboarding your machines to threat and vulnerability management The integration between Microsoft Defender for Endpoint and Microsoft Defender for Cloud takes place in the background, so it doesn't involve any changes at the endpoint level. @@ -54,6 +50,14 @@ The integration between Microsoft Defender for Endpoint and Microsoft Defender f The findings for **all** vulnerability assessment tools are in the Defender for Cloud recommendation **Vulnerabilities in your virtual machines should be remediated**. Learn about how to [view and remediate findings from vulnerability assessment solutions on your VMs](remediate-vulnerability-findings-vm.md) +## Learn more + +If you would like to learn more from the product manager about security posture, check out [Microsoft Defender for Servers](episode-five.md). + +You can also check out the following blogs: + +- [Security posture management and server protection for AWS and GCP are now generally available](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/security-posture-management-and-server-protection-for-aws-and/ba-p/3271388) +- [Microsoft Defender for Cloud Server Monitoring Dashboard](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/microsoft-defender-for-cloud-server-monitoring-dashboard/ba-p/2869658) ## Next steps > [!div class="nextstepaction"] diff --git a/articles/defender-for-cloud/episode-eight.md b/articles/defender-for-cloud/episode-eight.md new file mode 100644 index 000000000000..65d5b10e5a78 --- /dev/null +++ b/articles/defender-for-cloud/episode-eight.md @@ -0,0 +1,49 @@ +--- +title: Microsoft Defender for IoT +description: Learn how Defender for IoT discovers devices to monitor and how it fits in the Microsoft Security portfolio. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Microsoft Defender for IoT + +**Episode description**: In this episode of Defender for Cloud in the Field, Dolev Zemer joins Yuri Diogenes to talk about how Defender for IoT works. Dolev explains the difference between OT Security and IT Security and how Defender for IoT fulfills this gap. Dolev also demonstrates how Defender for IoT discovers devices to monitor and how it fits in the Microsoft Security portfolio. + +
    +
    + + +- [1:20](/shows/mdc-in-the-field/defender-for-iot#time=01m20s) - Overview of the Defender for IoT solution + +- [2:15](/shows/mdc-in-the-field/defender-for-iot#time=02m15s) - Difference between OT and IoT + +- [3:30](/shows/mdc-in-the-field/defender-for-iot#time=03m30s) - Prerequisites to use Defender for IoT + +- [4:30](/shows/mdc-in-the-field/defender-for-iot#time=04m30s) - Security posture and threat detection + +- [5:17](/shows/mdc-in-the-field/defender-for-iot#time=05m17s) - Automating alert response + +- [6:15](/shows/mdc-in-the-field/defender-for-iot#time=06m15s) - Integration with Microsoft Sentinel + +- [6:50](/shows/mdc-in-the-field/defender-for-iot#time=06m50s) - Architecture + +- [8:40](/shows/mdc-in-the-field/defender-for-iot#time=08m40s) - Demonstration + +## Recommended resources + +Learn more about [Defender for IoT](../defender-for-iot/index.yml). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Microsoft Defender for Containers in a Multi-Cloud Environment](episode-nine.md) \ No newline at end of file diff --git a/articles/defender-for-cloud/episode-eleven.md b/articles/defender-for-cloud/episode-eleven.md new file mode 100644 index 000000000000..a95c45dc7198 --- /dev/null +++ b/articles/defender-for-cloud/episode-eleven.md @@ -0,0 +1,41 @@ +--- +title: Threat landscape for Defender for Containers +description: Learn about the new detections that are available for different attacks and how Defender for Containers can help to quickly identify malicious activities in containers. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Threat landscape for Defender for Containers + +**Episode description**: In this episode of Defender for Cloud in the Field, Yossi Weizman joins Yuri Diogenes to talk about the evolution of the threat matrix for Containers and how attacks against Kubernetes have evolved. Yossi also demonstrates new detections that are available for different attacks and how Defender for Containers can help to quickly identify malicious activities in containers. + +
    +
    + + +- [01:15](/shows/mdc-in-the-field/threat-landscape-containers#time=01m15s) - The evolution of attacks against Kubernetes + +- [02:50](/shows/mdc-in-the-field/threat-landscape-containers#time=02m50s) - Identity related attacks against Kubernetes + +- [04:00](/shows/mdc-in-the-field/threat-landscape-containers#time=04m00s) - Threat detection beyond audit logs + +- [05:48](/shows/mdc-in-the-field/threat-landscape-containers#time=5m48s) - Demonstration + +## Recommended resources + +Learn how to [detect identity attacks in Kubernetes](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/detecting-identity-attacks-in-kubernetes/ba-p/3232340). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Enhanced workload protection features in Defender for Servers](episode-twelve.md) diff --git a/articles/defender-for-cloud/episode-five.md b/articles/defender-for-cloud/episode-five.md new file mode 100644 index 000000000000..86f35a328c8f --- /dev/null +++ b/articles/defender-for-cloud/episode-five.md @@ -0,0 +1,47 @@ +--- +title: Microsoft Defender for Servers +description: Learn all about Microsoft Defender for Servers from the product manager. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Microsoft Defender for Servers + +**Episode description**: In this episode of Defender for Cloud in the field, Aviv Mor joins Yuri Diogenes to talk about Microsoft Defender for Servers updates, including the new integration with TVM. Aviv explains how this new integration with TVM works, the advantages of this integration, which includes software inventory and easy experience to onboard. Aviv also covers the integration with MDE for Linux and the Defender for Servers support for the new multi-cloud connector for AWS. + +
    +
    + + +- [1:22](/shows/mdc-in-the-field/defender-for-containers#time=01m22s) - Overview of the announcements for Microsoft Defender for Servers + +- [5:50](/shows/mdc-in-the-field/defender-for-containers#time=05m50s) - Migration path from Qualys VA to TVM + +- [7:12](/shows/mdc-in-the-field/defender-for-containers#time=07m12s) - TVM capabilities in Defender for Servers + +- [8:38](/shows/mdc-in-the-field/defender-for-containers#time=08m38s) - Threat detections for Defender for Servers + +- [9:52](/shows/mdc-in-the-field/defender-for-containers#time=09m52s) - Defender for Servers in AWS + +- [12:23](/shows/mdc-in-the-field/defender-for-containers#time=12m23s) - Onboard process for TVM in an on-premises scenario + +- [13:20](/shows/mdc-in-the-field/defender-for-containers#time=13m20s) - Demonstration + +## Recommended resources + +Learn how to [Investigate weaknesses with Microsoft Defender for Endpoint's threat and vulnerability management](deploy-vulnerability-assessment-tvm.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Lessons Learned from the Field](episode-six.md) diff --git a/articles/defender-for-cloud/episode-four.md b/articles/defender-for-cloud/episode-four.md new file mode 100644 index 000000000000..f8850e1f96df --- /dev/null +++ b/articles/defender-for-cloud/episode-four.md @@ -0,0 +1,43 @@ +--- +title: Security posture management improvements in Microsoft Defender for Cloud +description: Learn how to manage your security posture with Microsoft Defender for Cloud. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Security posture management improvements in Microsoft Defender for Cloud + +**Episode description**: In this episode of Defender for Cloud in the field, Lior Arviv joins Yuri Diogenes to talk about the cloud security posture management improvements in Microsoft Defender for Cloud. Lior explains the MITRE ATT&CK Framework integration with recommendations, the overall improvements of recommendations and the other fields added in the API. Lior also demonstrates the different ways to access the MITRE ATT&CK integration via filters and recommendations. + +
    +
    + + +- [1:24](/shows/mdc-in-the-field/defender-for-containers#time=01m24s) - Security recommendation refresh time changes + +- [3:50](/shows/mdc-in-the-field/defender-for-containers#time=03m50s) - MITRE ATT&CK Framework mapping to recommendations + +- [6:14](/shows/mdc-in-the-field/defender-for-containers#time=06m14s) - Demonstration + +- [14:44](/shows/mdc-in-the-field/defender-for-containers#time=14m44s) - Secure Score API updates + +- [18:54](/shows/mdc-in-the-field/defender-for-containers#time=18m54s) - What's coming next + +## Recommended resources + +Learn how to [Review your security recommendations](review-security-recommendations.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Microsoft Defender for Servers](episode-five.md) diff --git a/articles/defender-for-cloud/episode-nine.md b/articles/defender-for-cloud/episode-nine.md new file mode 100644 index 000000000000..edd39666bc8b --- /dev/null +++ b/articles/defender-for-cloud/episode-nine.md @@ -0,0 +1,43 @@ +--- +title: Microsoft Defender for Containers in a multi-cloud environment +description: Learn about Microsoft Defender for Containers implementation in AWS and GCP. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Microsoft Defender for Containers in a Multi-Cloud Environment + +**Episode description**: In this episode of Defender for Cloud in the field, Maya Herskovic joins Yuri Diogenes to talk about Microsoft Defender for Containers implementation in AWS and GCP. + +Maya explains about the new workload protection capabilities related to Containers when they're deployed in a multi-cloud environment. Maya also demonstrates the onboarding experience in GCP and how to visualize security recommendations across AWS, GCP, and Azure in a single dashboard. + +
    +
    + + +- [01:12](/shows/mdc-in-the-field/containers-multi-cloud#time=01m12s) - Container protection in a multi-cloud environment + +- [05:03](/shows/mdc-in-the-field/containers-multi-cloud#time=05m03s) - Workload protection capabilities for GCP + +- [06:18](/shows/mdc-in-the-field/containers-multi-cloud#time=06m18s) - Single dashboard for multi-cloud + +- [10:25](/shows/mdc-in-the-field/containers-multi-cloud#time=10m25s) - Demonstration + +## Recommended resources + +Learn how to [Enable Microsoft Defender for Containers](defender-for-containers-enable.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Protecting Containers in GCP with Defender for Containers](episode-ten.md) diff --git a/articles/defender-for-cloud/episode-one.md b/articles/defender-for-cloud/episode-one.md new file mode 100644 index 000000000000..f0e3673e9d56 --- /dev/null +++ b/articles/defender-for-cloud/episode-one.md @@ -0,0 +1,47 @@ +--- +title: New AWS connector in Microsoft Defender for Cloud +description: Learn all about the new AWS connector in Microsoft Defender for Cloud. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# New AWS connector in Microsoft Defender for Cloud + +**Episode description**: In this episode of Defender for Cloud in the field, Or Serok joins Yuri Diogenes to share the new AWS connector in Microsoft Defender for Cloud, which was released at Ignite 2021. Or explains the use case scenarios for the new connector and how the new connector work. She demonstrates the onboarding process to connect AWS with Microsoft Defender for Cloud and talks about the centralized management of all security recommendations. + +
    +
    + + +- [00:00](/shows/mdc-in-the-field/aws-connector) - Introduction + +- [2:20](/shows/mdc-in-the-field/aws-connector) - Understanding the new AWS connector. + +- [3:45](/shows/mdc-in-the-field/aws-connector) - Overview of the new onboarding experience. + +- [4:30](/shows/mdc-in-the-field/aws-connector) - Customizing recommendations for AWS workloads. + +- [7:03](/shows/mdc-in-the-field/aws-connector) - Beyond CSPM capabilities. + +- [11:14](/shows/mdc-in-the-field/aws-connector) - Demonstration of the recommendations and onboarding process. + +- [23:20](/shows/mdc-in-the-field/aws-connector) - Demonstration of how to customize AWS assessments. + +## Recommended resources + +Learn more about the new [AWS connector](quickstart-onboard-aws.md) + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Integrate Azure Purview with Microsoft Defender for Cloud](episode-two.md) diff --git a/articles/defender-for-cloud/episode-seven.md b/articles/defender-for-cloud/episode-seven.md new file mode 100644 index 000000000000..b0fda391035d --- /dev/null +++ b/articles/defender-for-cloud/episode-seven.md @@ -0,0 +1,47 @@ +--- +title: New GCP connector in Microsoft Defender for Cloud +description: Learn all about the new GCP connector in Microsoft Defender for Cloud. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# New GCP connector in Microsoft Defender for Cloud + +**Episode description**: In this episode of Defender for Cloud in the field, Or Serok joins Yuri Diogenes to share the new GCP Connector in Microsoft Defender for Cloud. Or explains the use case scenarios for the new connector and how the new connector works. She demonstrates the onboarding process to connect GCP with Microsoft Defender for Cloud and talks about custom assessment and the CSPM experience for multi-cloud. + +
    +
    + + +- [1:23](/shows/mdc-in-the-field/gcp-connector#time=01m23s) - Overview of the new GCP connector + +- [4:05](/shows/mdc-in-the-field/gcp-connector#time=04m05s) - Migration path from the old GCP connector to the new one + +- [5:10](/shows/mdc-in-the-field/gcp-connector#time=05m10s) - Type of assessment utilized by the new GCP connector + +- [5:51](/shows/mdc-in-the-field/gcp-connector#time=05m51s) - Custom assessments + +- [6:52](/shows/mdc-in-the-field/gcp-connector#time=06m52s) - Demonstration + +- [15:05](/shows/mdc-in-the-field/gcp-connector#time=15m05s) - Recommendation experience + +- [18:00](/shows/mdc-in-the-field/gcp-connector#time=18m00s) - Final considerations + +## Recommended resources + +Learn more how to [Connect your GCP projects to Microsoft Defender for Cloud](quickstart-onboard-gcp.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Microsoft Defender for IoT](episode-eight.md) diff --git a/articles/defender-for-cloud/episode-six.md b/articles/defender-for-cloud/episode-six.md new file mode 100644 index 000000000000..b624c486560c --- /dev/null +++ b/articles/defender-for-cloud/episode-six.md @@ -0,0 +1,45 @@ +--- +title: Lessons learned from the field with Microsoft Defender for Cloud +description: Learn how Microsoft Defender for Cloud is used to fill the gap between cloud security posture management and cloud workload protection. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Lessons learned from the field with Microsoft Defender for Cloud + +**Episode description**: In this episode Carlos Faria, Microsoft Cybersecurity Consultant joins Yuri to talk about lessons from the field and how customers are using Microsoft Defender for Cloud to improve their security posture and protect their workloads in a multi-cloud environment. + +Carlos also covers how Microsoft Defender for Cloud is used to fill the gap between cloud security posture management and cloud workload protection, and demonstrates some features related to this scenario. + +
    +
    + + +- [1:30](/shows/mdc-in-the-field/lessons-from-the-field#time=01m30s) - Why Microsoft Defender for Cloud is a unique solution when compared with other competitors? + +- [2:58](/shows/mdc-in-the-field/lessons-from-the-field#time=02m58s) - How to fulfill the gap between CSPM and CWPP + +- [4:42](/shows/mdc-in-the-field/lessons-from-the-field#time=04m42s) - How a multi-cloud affects the CSPM lifecycle and how Defender for Cloud fits in? + +- [8:05](/shows/mdc-in-the-field/lessons-from-the-field#time=08m05s) - Demonstration + +- [12:34](/shows/mdc-in-the-field/lessons-from-the-field#time=12m34s) - Final considerations + +## Recommended resources + +Learn more [What is Microsoft Defender for Cloud?](defender-for-cloud-introduction.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [New GCP Connector in Microsoft Defender for Cloud](episode-seven.md) \ No newline at end of file diff --git a/articles/defender-for-cloud/episode-ten.md b/articles/defender-for-cloud/episode-ten.md new file mode 100644 index 000000000000..c3d232b11329 --- /dev/null +++ b/articles/defender-for-cloud/episode-ten.md @@ -0,0 +1,43 @@ +--- +title: Protecting containers in GCP with Defender for Containers +description: Learn how to use Defender for Containers, to protect Containers that are located in Google Cloud Projects. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Protecting containers in GCP with Defender for Containers + +**Episode description**: In this episode of Defender for Cloud in the field, Nadav Wolfin joins Yuri Diogenes to talk about how to use Defender for Containers to protect Containers that are located at Google Cloud (GCP). + +Nadav gives insights about workload protection for GKE and how to obtain visibility of this type of workload across Azure and AWS. Nadav also demonstrates the overall onboarding experience and provides an overview of the architecture of this solution. + +
    +
    + + +- [00:55](/shows/mdc-in-the-field/gcp-containers#time=00m55s) - Architecture solution for Defender for Containers and support for GKE + +- [06:42](/shows/mdc-in-the-field/gcp-containers#time=06m42s) - How the onboard process works + +- [08:46](/shows/mdc-in-the-field/gcp-containers#time=08m46s) - Demonstration + +- [26:18](/shows/mdc-in-the-field/gcp-containers#time=26m18s) - Integration with Azure Arc + +## Recommended resources + +Learn how to [Enable Microsoft Defender for Containers](defender-for-containers-enable.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Threat landscape for Containers](episode-eleven.md) diff --git a/articles/defender-for-cloud/episode-three.md b/articles/defender-for-cloud/episode-three.md new file mode 100644 index 000000000000..d13c1e568fa5 --- /dev/null +++ b/articles/defender-for-cloud/episode-three.md @@ -0,0 +1,47 @@ +--- +title: Microsoft Defender for Containers +description: Learn how about Microsoft Defender for Containers. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Microsoft Defender for Containers + +**Episode description**: In this episode of Defender for Cloud in the field, Maya Herskovic joins Yuri Diogenes to talk about Microsoft Defender for Containers. Maya explains what's new in Microsoft Defender for Containers, the new capabilities that are available, the new pricing model, and the multi-cloud coverage. Maya also demonstrates the overall experience of Microsoft Defender for Containers from the recommendations to the alerts that you may receive. + +
    +
    + + +- [1:09](/shows/mdc-in-the-field/defender-for-containers#time=01m09s) - What's new in the Defender for Containers plan? + +- [4:42](/shows/mdc-in-the-field/defender-for-containers#time=04m42s) - Change in the host level protection + +- [8:08](/shows/mdc-in-the-field/defender-for-containers#time=08m08s) - How to migrate to the new plan? + +- [9:28](/shows/mdc-in-the-field/defender-for-containers#time=09m28s) - Onboarding requirements + +- [11:45](/shows/mdc-in-the-field/defender-for-containers#time=11m45s) - Improvements in the anomaly detection + +- [13:27](/shows/mdc-in-the-field/defender-for-containers#time=13m27s) - Demonstration + +- [22:17](/shows/mdc-in-the-field/defender-for-containers#time=22m17s) - Final considerations + +## Recommended resources + +Learn more about [Microsoft Defender for Containers](defender-for-containers-introduction.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Security posture management improvements](episode-four.md) \ No newline at end of file diff --git a/articles/defender-for-cloud/episode-twelve.md b/articles/defender-for-cloud/episode-twelve.md new file mode 100644 index 000000000000..22749ebcfba0 --- /dev/null +++ b/articles/defender-for-cloud/episode-twelve.md @@ -0,0 +1,47 @@ +--- +title: Enhanced workload protection features in Defender for Servers +description: Learn about the enhanced capabilities available in Defender for Servers, for VMs that are located in GCP, AWS and on-premises. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Enhanced workload protection features in Defender for Servers + +**Episode description**: In this episode of Defender for Cloud in the Field, Netta Norman joins Yuri Diogenes to talk about the enhanced capabilities available in Defender for Servers, for VMs that are located in GCP, AWS and on-premises. + +Netta explains how Defender for Servers applies Azure Arc as a bridge to onboard non-Azure VMs as she demonstrates what the experience looks like. + +
    +
    + + +- [00:55](/shows/mdc-in-the-field/enhanced-workload-protection#time=00m55s) - Arc Auto-provisioning in GCP + +- [2:57](/shows/mdc-in-the-field/enhanced-workload-protection#time=02m57s) - Prerequisites to Arc auto-provisioning + +- [3:50](/shows/mdc-in-the-field/enhanced-workload-protection#time=03m50s) - Considerations when enabling Defender for Server plan in GCP + +- [5:20](/shows/mdc-in-the-field/enhanced-workload-protection#time=05m20s) - Dashboard refresh time interval + +- [7:00](/shows/mdc-in-the-field/enhanced-workload-protection#time=07m00s) - Security value for non-Azure workloads + +- [9:06](/shows/mdc-in-the-field/enhanced-workload-protection#time=05m20s) - Demonstration + +## Recommended resources + +Introduce yourself to [Microsoft Defender for Servers](defender-for-servers-introduction.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [New AWS Connector in Microsoft Defender for Cloud](episode-one.md) diff --git a/articles/defender-for-cloud/episode-two.md b/articles/defender-for-cloud/episode-two.md new file mode 100644 index 000000000000..ed55c44dd961 --- /dev/null +++ b/articles/defender-for-cloud/episode-two.md @@ -0,0 +1,47 @@ +--- +title: Integrate Azure Purview with Microsoft Defender for Cloud +description: Learn how to integrate Azure Purview with Microsoft Defender for Cloud. +ms.topic: reference +ms.date: 05/25/2022 +--- + +# Integrate Azure Purview with Microsoft Defender for Cloud + +**Episode description**: In this episode of Defender for Cloud in the field, David Trigano joins Yuri Diogenes to share the new integration of Microsoft Defender for Cloud with Azure Purview, which was released at Ignite 2021. + +David explains the use case scenarios for this integration and how the data classification is done by Azure Purview can help prioritize recommendations and alerts in Defender for Cloud. David also demonstrates the overall experience of data enrichment based on the information that flows from Azure Purview to Defender for Cloud. + +
    +
    + + +- [1:36](/shows/mdc-in-the-field/integrate-with-purview) - Overview of Azure Purview + +- [2:40](/shows/mdc-in-the-field/integrate-with-purview) - Integration with Microsoft Defender for Cloud + +- [3:48](/shows/mdc-in-the-field/integrate-with-purview) - How the integration with Azure Purview helps to prioritize Recommendations in Microsoft Defender for Cloud + +- [5:26](/shows/mdc-in-the-field/integrate-with-purview) - How the integration with Azure Purview helps to prioritize Alerts in Microsoft Defender for Cloud + +- [8:54](/shows/mdc-in-the-field/integrate-with-purview) - Demonstration + +- [16:50](/shows/mdc-in-the-field/integrate-with-purview) - Final considerations + +## Recommended resources + +Learn more about the [integration with Azure Purview](information-protection.md). + +- Subscribe to [Microsoft Security on YouTube](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqa0ZoTml2Qm9kZ2pjRzNMUXFqVUwyNl80YVNtd3xBQ3Jtc0trVm9QM2Z0NlpOeC1KSUE2UEd1cVJ5aHQ0MTN6WjJEYmNlOG9rWC1KZ1ZqaTNmcHdOOHMtWXRLSGhUTVBhQlhhYzlUc2xmTHZtaUpkd1c4LUQzLWt1YmRTbkVQVE5EcTJIM0Foc042SGdQZU5acVRJbw&q=https%3A%2F%2Faka.ms%2FSubscribeMicrosoftSecurity) + +- Follow us on social media: + [LinkedIn](https://www.youtube.com/redirect?event=video_description&redir_token=QUFFLUhqbFk5TXZuQld2NlpBRV9BQlJqMktYSm95WWhCZ3xBQ3Jtc0tsQU13MkNPWGNFZzVuem5zc05wcnp0VGxybHprVTkwS2todWw0b0VCWUl4a2ZKYVktNGM1TVFHTXpmajVLcjRKX0cwVFNJaDlzTld4MnhyenBuUGRCVmdoYzRZTjFmYXRTVlhpZGc4MHhoa3N6ZDhFMA&q=https%3A%2F%2Fwww.linkedin.com%2Fshowcase%2Fmicrosoft-security%2F) + [Twitter](https://twitter.com/msftsecurity) + +- Join our [Tech Community](https://aka.ms/SecurityTechCommunity) + +- For more about [Microsoft Security](https://msft.it/6002T9HQY) + +## Next steps + +> [!div class="nextstepaction"] +> [Watch Episode 3](episode-three.md) diff --git a/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-aks.md b/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-aks.md index 00807ea8f11f..6014a019b4bc 100644 --- a/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-aks.md +++ b/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-aks.md @@ -1,9 +1,9 @@ --- -author: ElazarK -ms.author: elkrieger +author: bmansheim +ms.author: benmansheim ms.service: defender-for-cloud ms.topic: include -ms.date: 05/12/2022 +ms.date: 05/26/2022 --- ## Enable the plan @@ -19,16 +19,18 @@ ms.date: 05/12/2022 > > :::image type="content" source="../media/release-notes/defender-plans-deprecated-indicator.png" alt-text="Defender for container registries and Defender for Kubernetes plans showing 'Deprecated' and upgrade information."::: -1. By default, when enabling the plan through the Azure portal, [Microsoft Defender for Containers](../defender-for-containers-introduction.md) is configured to auto provision (automatically install) required components to provide the protections offered by plan, including the assignment of a default workspace. +1. By default, when enabling the plan through the Azure portal, [Microsoft Defender for Containers](../defender-for-containers-introduction.md) is configured to auto provision (automatically install) required components to provide the protections offered by plan, including the assignment of a default workspace. - Optionally, you can modify this configuration from the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier) or from the [Auto provisioning page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/dataCollection) on the **Microsoft Defender for Containers components (preview)** row: + If you want to disable auto provisioning during the onboading process, select **Edit configuration** for the **Containers** plan. This opens the Advanced options, where you can disable auto provisioning for each component. + + In addition, you can modify this configuration from the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier) or from the [Auto provisioning page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/dataCollection) on the **Microsoft Defender for Containers components (preview)** row: :::image type="content" source="../media/defender-for-containers/auto-provisioning-defender-for-containers.png" alt-text="Screenshot of the auto provisioning options for Microsoft Defender for Containers." lightbox="../media/defender-for-containers/auto-provisioning-defender-for-containers.png"::: > [!NOTE] > If you choose to **disable the plan** at any time after enabling it through the portal as shown above, you'll need to manually remove Defender for Containers components deployed on your clusters. - You can [assign a custom workspace](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-aks#assign-a-custom-workspace) through Azure Policy. + You can [assign a custom workspace](../defender-for-containers-enable.md?pivots=defender-for-container-aks&tabs=aks-deploy-portal%2ck8s-deploy-asc%2ck8s-verify-asc%2ck8s-remove-arc%2caks-removeprofile-api#assign-a-custom-workspace) through Azure Policy. 1. If you disable the auto provisioning of any component, you can easily deploy the component to one or more clusters using the appropriate recommendation: @@ -39,13 +41,13 @@ ms.date: 05/12/2022 > [!Note] >Microsoft Defender for Containers is configured to defend all of your clouds automatically. When you install all of the required prerequisites and enable all of the auto provisioning capabilities. > - > If you choose to disable all of the auto provision configuration options, no agents, or components will be deployed to your clusters. Protection will be limited to the Agentless features only. Learn which features are Agentless in the [availability section](../supported-machines-endpoint-solutions-clouds-containers.md) for Defender for Containers. + > If you choose to disable all of the auto provision configuration options, no agents, or components will be deployed to your clusters. Protection will be limited to the Agentless features only. Learn which features are Agentless in the [availability section](../supported-machines-endpoint-solutions-clouds-containers.md) for Defender for Containers. ## Deploy the Defender profile You can enable the Defender for Containers plan and deploy all of the relevant components from the Azure portal, the REST API, or with a Resource Manager template. For detailed steps, select the relevant tab. -Once the Defender profile has been deployed, a default workspace will be automatically assigned. You can [assign a custom workspace](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-aks#assign-a-custom-workspace) in place of the default workspace through Azure Policy. +Once the Defender profile has been deployed, a default workspace will be automatically assigned. You can [assign a custom workspace](../defender-for-containers-enable.md?pivots=defender-for-container-aks&tabs=aks-deploy-portal%2ck8s-deploy-asc%2ck8s-verify-asc%2ck8s-remove-arc%2caks-removeprofile-api#assign-a-custom-workspace) in place of the default workspace through Azure Policy. The Defender security profile is a preview feature. [!INCLUDE [Legalese](../../../includes/defender-for-cloud-preview-legal-text.md)] @@ -53,7 +55,7 @@ The Defender security profile is a preview feature. [!INCLUDE [Legalese](../../. ### Use the fix button from the Defender for Cloud recommendation -A streamlined, frictionless, process lets you use the Azure portal pages to enable the Defender for Cloud plan and setup auto provisioning of all the necessary components for defending your Kubernetes clusters at scale. +A streamlined, frictionless, process lets you use the Azure portal pages to enable the Defender for Cloud plan and setup auto provisioning of all the necessary components for defending your Kubernetes clusters at scale. A dedicated Defender for Cloud recommendation provides: @@ -73,7 +75,6 @@ A dedicated Defender for Cloud recommendation provides: 1. Select **Fix *[x]* resources**. - ### [**REST API**](#tab/aks-deploy-rest) ### Use the REST API to deploy the Defender profile @@ -85,9 +86,9 @@ PUT https://management.azure.com/subscriptions/{{Subscription Id}}/resourcegroup ``` Request URI: `https://management.azure.com/subscriptions/{{SubscriptionId}}/resourcegroups/{{ResourceGroup}}/providers/Microsoft.ContainerService/managedClusters/{{ClusterName}}?api-version={{ApiVersion}}` - + Request query parameters: - + | Name | Description | Mandatory | |----------------|------------------------------------|-----------| | SubscriptionId | Cluster's subscription ID | Yes | @@ -95,9 +96,8 @@ Request query parameters: | ClusterName | Cluster's name | Yes | | ApiVersion | API version, must be >= 2021-07-01 | Yes | - Request Body: - + ```rest { "location": "{{Location}}", @@ -111,7 +111,7 @@ Request Body: } } ``` - + Request body parameters: | Name | Description | Mandatory | @@ -120,7 +120,61 @@ Request body parameters: | properties.securityProfile.azureDefender.enabled | Determines whether to enable or disable Microsoft Defender for Containers on the cluster | Yes | | properties.securityProfile.azureDefender.logAnalyticsWorkspaceResourceId | Log Analytics workspace Azure resource ID | Yes | +### [**Azure CLI**](#tab/k8s-deploy-cli) + +### Use Azure CLI to deploy the Defender extension + +1. Log in to Azure: + + ```azurecli + az login + az account set --subscription + ``` + + > [!IMPORTANT] + > Ensure that you use the same subscription ID for ```` as the one associated with your AKS cluster. + +1. Enable the feature flag in the CLI: + + ```azurecli + az feature register --namespace Microsoft.ContainerService --name AKS-AzureDefender + ``` + +1. Enable the Defender profile on your containers: + + - Run the following command to create a new cluster with the Defender profile enabled: + + ```azurecli + az aks create --enable-defender --resource-group --name + ``` + + - Run the following command to enable the Defender profile on an existing cluster: + + ```azurecli + az aks update --enable-defender --resource-group --name + ``` + + A description of all the supported configuration settings on the Defender extension type is given below: + + | Property | Description | + |----------|-------------| + | logAnalyticsWorkspaceResourceID | **Optional**. Full resource ID of your own Log Analytics workspace.
    When not provided, the default workspace of the region will be used.

    To get the full resource ID, run the following command to display the list of workspaces in your subscriptions in the default JSON format:
    ```az resource list --resource-type Microsoft.OperationalInsights/workspaces -o json```

    The Log Analytics workspace resource ID has the following syntax:
    /subscriptions/{your-subscription-id}/resourceGroups/{your-resource-group}/providers/Microsoft.OperationalInsights/workspaces/{your-workspace-name}.
    Learn more in [Log Analytics workspaces](../../azure-monitor/logs/log-analytics-workspace-overview.md) | + + You can include these settings in a JSON file and specify the JSON file in the `az aks create` and `az aks update` commands with this parameter: `--defender-config`. The format of the JSON file must be: + + ```json + {"logAnalyticsWorkspaceResourceID": ""} + ``` + + Learn more about AKS CLI commands in [az aks](/cli/azure/aks). + +1. To verify that the profile was successfully added, run the following command on your machine with the `kubeconfig` file pointed to your cluster: + + ```console + kubectl get pods -n azuredefender + ``` + When the profile is added, you should see a pod called `azuredefender-XXXXX` in `Running` state. It might take a few minutes for pods to be added. ### [**Resource Manager**](#tab/aks-deploy-arm) diff --git a/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-arc.md b/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-arc.md index 8b7be0f31e29..8442fcbb74d2 100644 --- a/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-arc.md +++ b/articles/defender-for-cloud/includes/defender-for-containers-enable-plan-arc.md @@ -12,7 +12,7 @@ ms.date: 05/12/2022 1. From Defender for Cloud's menu, open the [Environment settings page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/EnvironmentSettings) and select the relevant subscription. -1. In the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier), enable **Defender for Containers** +1. In the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier), enable **Defender for Containers**. > [!TIP] > If the subscription already has Defender for Kubernetes and/or Defender for container registries enabled, an update notice is shown. Otherwise, the only option will be **Defender for Containers**. @@ -21,14 +21,16 @@ ms.date: 05/12/2022 1. By default, when enabling the plan through the Azure portal, [Microsoft Defender for Containers](../defender-for-containers-introduction.md) is configured to auto provision (automatically install) required components to provide the protections offered by plan, including the assignment of a default workspace. - Optionally, you can modify this configuration from the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier) or from the [Auto provisioning page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/dataCollection) on the **Microsoft Defender for Containers components (preview)** row: + If you want to disable auto provisioning during the onboading process, select **Edit configuration** for the **Containers** plan. This opens the Advanced options, where you can disable auto provisioning for each component. + + In addition, you can modify this configuration from the [Defender plans page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/pricingTier) or from the [Auto provisioning page](https://portal.azure.com/#blade/Microsoft_Azure_Security/SecurityMenuBlade/dataCollection) on the **Microsoft Defender for Containers components (preview)** row: :::image type="content" source="../media/defender-for-containers/auto-provisioning-defender-for-containers.png" alt-text="Screenshot of the auto provisioning options for Microsoft Defender for Containers." lightbox="../media/defender-for-containers/auto-provisioning-defender-for-containers.png"::: > [!NOTE] > If you choose to **disable the plan** at any time after enabling it through the portal as shown above, you'll need to manually remove Defender for Containers components deployed on your clusters. - You can [assign a custom workspace](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-arc#assign-a-custom-workspace) through Azure Policy. + You can [assign a custom workspace](../defender-for-containers-enable.md?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-arc#assign-a-custom-workspace) through Azure Policy. 1. If you disable the auto provisioning of any component, you can easily deploy the component to one or more clusters using the appropriate recommendation: @@ -39,6 +41,7 @@ ms.date: 05/12/2022 ## Prerequisites Before deploying the extension, ensure you: + - [Connect the Kubernetes cluster to Azure Arc](../../azure-arc/kubernetes/quickstart-connect-cluster.md) - Complete the [pre-requisites listed under the generic cluster extensions documentation](../../azure-arc/kubernetes/extensions.md#prerequisites). @@ -72,7 +75,6 @@ A dedicated Defender for Cloud recommendation provides: :::image type="content" source="../media/defender-for-kubernetes-azure-arc/security-center-deploy-extension.gif" alt-text="Deploy Defender extension for Azure Arc with Defender for Cloud's 'fix' option."::: - ### [**Azure CLI**](#tab/k8s-deploy-cli) ### Use Azure CLI to deploy the Defender extension @@ -119,12 +121,13 @@ You can use the **azure-defender-extension-arm-template.json** Resource Manager ### [**REST API**](#tab/k8s-deploy-api) -### Use REST API to deploy the Defender extension +### Use REST API to deploy the Defender extension To use the REST API to deploy the Defender extension, you'll need a Log Analytics workspace on your subscription. Learn more in [Log Analytics workspaces](../../azure-monitor/logs/log-analytics-workspace-overview.md). > [!TIP] > The simplest way to use the API to deploy the Defender extension is with the supplied **Postman Collection JSON** example from Defender for Cloud's [installation examples](https://aka.ms/kubernetes-extension-installation-examples). + - To modify the Postman Collection JSON, or to manually deploy the extension with the REST API, run the following PUT command: ```rest @@ -139,12 +142,11 @@ To use the REST API to deploy the Defender extension, you'll need a Log Analytic |Resource Group | Path | True | String | Name of the resource group containing your Azure Arc-enabled Kubernetes resource | | Cluster Name | Path | True | String | Name of your Azure Arc-enabled Kubernetes resource | - - For **Authentication**, your header must have a Bearer token (as with other Azure APIs). To get a bearer token, run the following command: `az account get-access-token --subscription ` Use the following structure for the body of your message: + ```json { "properties": { @@ -162,10 +164,10 @@ To use the REST API to deploy the Defender extension, you'll need a Log Analytic Description of the properties is given below: - | Property | Description | + | Property | Description | | -------- | ----------- | | logAnalytics.workspaceId | Workspace ID of the Log Analytics resource | - | logAnalytics.key | Key of the Log Analytics resource | + | logAnalytics.key | Key of the Log Analytics resource | | auditLogPath | **Optional**. The full path to the audit log files. The default value is ``/var/log/kube-apiserver/audit.log`` | --- @@ -186,7 +188,6 @@ To verify that your cluster has the Defender extension installed on it, follow t 1. Check that the cluster on which you deployed the extension is listed as **Healthy**. - ### [**Azure portal - Azure Arc**](#tab/k8s-verify-arc) ### Use the Azure Arc pages to verify the status of your extension @@ -201,7 +202,6 @@ To verify that your cluster has the Defender extension installed on it, follow t :::image type="content" source="../media/defender-for-kubernetes-azure-arc/extension-details-page.png" alt-text="Full details of an Azure Arc extension on a Kubernetes cluster."::: - ### [**Azure CLI**](#tab/k8s-verify-cli) ### Use Azure CLI to verify that the extension is deployed @@ -216,9 +216,9 @@ To verify that your cluster has the Defender extension installed on it, follow t > [!NOTE] > It might show "installState": "Pending" for the first few minutes. - + 1. If the state shows **Installed**, run the following command on your machine with the `kubeconfig` file pointed to your cluster to check that a pod called "azuredefender-XXXXX" is in 'Running' state: - + ```console kubectl get pods -n azuredefender ``` @@ -247,5 +247,3 @@ To confirm a successful deployment, or to validate the status of your extension ``` --- - - diff --git a/articles/defender-for-cloud/includes/defender-for-containers-override-faq.md b/articles/defender-for-cloud/includes/defender-for-containers-override-faq.md index 5eb26c8bd35b..d567d3dcc04b 100644 --- a/articles/defender-for-cloud/includes/defender-for-containers-override-faq.md +++ b/articles/defender-for-cloud/includes/defender-for-containers-override-faq.md @@ -15,7 +15,7 @@ ms.date: 05/12/2022 ### How can I use my existing Log Analytics workspace? -You can use your existing Log Analytics workspace by following the steps in the [Assign a custom workspace](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-aks#assign-a-custom-workspace) workspace section of this article. +You can use your existing Log Analytics workspace by following the steps in the [Assign a custom workspace](../defender-for-containers-enable.md?pivots=defender-for-container-aks&tabs=aks-deploy-portal%2ck8s-deploy-asc%2ck8s-verify-asc%2ck8s-remove-arc%2caks-removeprofile-api#assign-a-custom-workspace) workspace section of this article. ### Can I delete the default workspaces created by Defender for Cloud? diff --git a/articles/defender-for-cloud/includes/defender-for-containers-remove-profile.md b/articles/defender-for-cloud/includes/defender-for-containers-remove-profile.md index 476a2b06f067..1d4e814de91b 100644 --- a/articles/defender-for-cloud/includes/defender-for-containers-remove-profile.md +++ b/articles/defender-for-cloud/includes/defender-for-containers-remove-profile.md @@ -9,7 +9,7 @@ ms.author: elkrieger To remove this - or any - Defender for Cloud extension, it's not enough to turn off auto provisioning: -- **Enabling** auto provisioning, potentially impacts *existing* and *future* machines. +- **Enabling** auto provisioning, potentially impacts *existing* and *future* machines. - **Disabling** auto provisioning for an extension, only affects the *future* machines - nothing is uninstalled by disabling auto provisioning. Nevertheless, to ensure the Defender for Containers components aren't automatically provisioned to your resources from now on, disable auto provisioning of the extensions as explained in [Configure auto provisioning for agents and extensions from Microsoft Defender for Cloud](../enable-data-collection.md). @@ -18,7 +18,7 @@ You can remove the profile using the REST API or a Resource Manager template as ### [**REST API**](#tab/aks-removeprofile-api) -### Use REST API to remove the Defender profile from AKS +### Use REST API to remove the Defender profile from AKS To remove the profile using the REST API, run the following PUT command: @@ -33,9 +33,8 @@ https://management.azure.com/subscriptions/{{SubscriptionId}}/resourcegroups/{{R | ClusterName | Cluster's name | Yes | | ApiVersion | API version, must be >= 2021-07-01 | Yes | - Request body: - + ```rest { "location": "{{Location}}", @@ -48,7 +47,7 @@ Request body: } } ``` - + Request body parameters: | Name | Description | Mandatory | @@ -56,7 +55,27 @@ Request body parameters: | location | Cluster's location | Yes | | properties.securityProfile.azureDefender.enabled | Determines whether to enable or disable Microsoft Defender for Containers on the cluster | Yes | +### [**Azure CLI**](#tab/k8s-remove-cli) + +### Use Azure CLI to remove the Defender profile + +1. Remove the Microsoft Defender for with the following commands: + ```azurecli + az login + az account set --subscription + az aks update --disable-defender --resource-group --name + ``` + + Removing the profile may take a few minutes. + +1. To verify that the profile was successfully removed, run the following command: + + ```console + kubectl get pods -n azuredefender + ``` + + When the profile is removed, you should see that no pods are returned in the `get pods` command. It might take a few minutes for the pods to be deleted. ### [**Resource Manager**](#tab/aks-removeprofile-resource-manager) @@ -69,7 +88,7 @@ To use Azure Resource Manager to remove the Defender profile, you'll need a Log The relevant template and parameters to remove the Defender profile from AKS are: -``` +```json { "type": "Microsoft.ContainerService/managedClusters", "apiVersion": "2021-07-01", diff --git a/articles/defender-for-cloud/information-protection.md b/articles/defender-for-cloud/information-protection.md index a6b1e7568162..c8169783c4ca 100644 --- a/articles/defender-for-cloud/information-protection.md +++ b/articles/defender-for-cloud/information-protection.md @@ -2,7 +2,7 @@ title: Prioritize security actions by data sensitivity - Microsoft Defender for Cloud description: Use Microsoft Purview's data sensitivity classifications in Microsoft Defender for Cloud ms.topic: overview -ms.date: 11/09/2021 +ms.date: 04/27/2022 --- # Prioritize security actions by data sensitivity @@ -70,6 +70,13 @@ A graph shows the number of recommendations and alerts by classified resource ty :::image type="content" source="./media/information-protection/overview-dashboard-information-protection.png" alt-text="Screenshot of the information protection tile in Microsoft Defender for Cloud's overview dashboard." lightbox="./media/information-protection/overview-dashboard-information-protection.png"::: +## Learn more + +If you would like to learn more from the product manager about Microsoft Defender for Cloud's [integration with Azure Purview](episode-two.md). + +You can also check out the following blog: + +- [Secure sensitive data in your cloud resources](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/secure-sensitive-data-in-your-cloud-resources/ba-p/2918646). ## Next steps diff --git a/articles/defender-for-cloud/media/defender-for-containers/Edit auto provisioning configuration.png b/articles/defender-for-cloud/media/defender-for-containers/Edit auto provisioning configuration.png new file mode 100644 index 000000000000..140a9c31bdbf Binary files /dev/null and b/articles/defender-for-cloud/media/defender-for-containers/Edit auto provisioning configuration.png differ diff --git a/articles/defender-for-cloud/quickstart-onboard-aws.md b/articles/defender-for-cloud/quickstart-onboard-aws.md index 566f635ffb6f..0d8f38a5bbbc 100644 --- a/articles/defender-for-cloud/quickstart-onboard-aws.md +++ b/articles/defender-for-cloud/quickstart-onboard-aws.md @@ -133,7 +133,7 @@ If you have any existing connectors created with the classic cloud connectors ex - (Optional) Select **Configure**, to edit the configuration as required. -1. By default the **Containers** plan is set to **On**. This is necessary to have Defender for Containers protect your AWS EKS clusters. Ensure you have fulfilled the [network requirements](https://docs.microsoft.com/azure/defender-for-cloud/defender-for-containers-enable?tabs=aks-deploy-portal%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Caks-removeprofile-api&pivots=defender-for-container-eks&source=docs#network-requirements) for the Defender for Containers plan. +1. By default the **Containers** plan is set to **On**. This is necessary to have Defender for Containers protect your AWS EKS clusters. Ensure you have fulfilled the [network requirements](./defender-for-containers-enable.md?pivots=defender-for-container-eks&source=docs&tabs=aks-deploy-portal%2ck8s-deploy-asc%2ck8s-verify-asc%2ck8s-remove-arc%2caks-removeprofile-api#network-requirements) for the Defender for Containers plan. > [!Note] > Azure Arc-enabled Kubernetes, the Defender Arc extension, and the Azure Policy Arc extension should be installed. Use the dedicated Defender for Cloud recommendations to deploy the extensions (and Arc, if necessary) as explained in [Protect Amazon Elastic Kubernetes Service clusters](defender-for-containers-enable.md?tabs=defender-for-container-eks). @@ -321,10 +321,19 @@ For other operating systems, the SSM Agent should be installed manually using th - [Install SSM Agent for a hybrid environment (Windows)](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-win.html) - [Install SSM Agent for a hybrid environment (Linux)](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-linux.html) +## Learn more + +If you would like to learn more from the product manager about Microsoft Defender for Cloud's new AWS connector check out [Microsoft Defender for Cloud in the Field](episode-one.md). + +You can also check out the following blogs: + +- [Ignite 2021: Microsoft Defender for Cloud news](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/ignite-2021-microsoft-defender-for-cloud-news/ba-p/2882807). +- [Custom assessments and standards in Microsoft Defender for Cloud for AWS workloads (Preview)](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/custom-assessments-and-standards-in-microsoft-defender-for-cloud/ba-p/3066575). +- [Security posture management and server protection for AWS and GCP](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/security-posture-management-and-server-protection-for-aws-and/ba-p/3271388) ## Next steps Connecting your AWS account is part of the multicloud experience available in Microsoft Defender for Cloud. For related information, see the following page: - [Security recommendations for AWS resources - a reference guide](recommendations-reference-aws.md). -- [Connect your GCP projects to Microsoft Defender for Cloud](quickstart-onboard-gcp.md) +- [Connect your GCP projects to Microsoft Defender for Cloud](quickstart-onboard-gcp.md) \ No newline at end of file diff --git a/articles/defender-for-cloud/recommendations-reference-aws.md b/articles/defender-for-cloud/recommendations-reference-aws.md index c7c8ace8afa9..6bffc8a7a7db 100644 --- a/articles/defender-for-cloud/recommendations-reference-aws.md +++ b/articles/defender-for-cloud/recommendations-reference-aws.md @@ -2,7 +2,7 @@ title: Reference table for all Microsoft Defender for Cloud recommendations for AWS resources description: This article lists Microsoft Defender for Cloud's security recommendations that help you harden and protect your AWS resources. ms.topic: reference -ms.date: 03/13/2022 +ms.date: 05/25/2022 ms.custom: generated --- # Security recommendations for AWS resources - a reference guide diff --git a/articles/defender-for-cloud/release-notes.md b/articles/defender-for-cloud/release-notes.md index f9c070361355..72d77e98c403 100644 --- a/articles/defender-for-cloud/release-notes.md +++ b/articles/defender-for-cloud/release-notes.md @@ -22,6 +22,7 @@ Updates in May include: - [Multicloud settings of Servers plan are now available in connector level](#multicloud-settings-of-servers-plan-are-now-available-in-connector-level) - [JIT (Just-in-time) access for VMs is now available for AWS EC2 instances (Preview)](#jit-just-in-time-access-for-vms-is-now-available-for-aws-ec2-instances-preview) +- [Add and remove the Defender profile for AKS clusters from the CLI](#add-and-remove-the-defender-profile-for-aks-clusters-from-the-cli) ### Multicloud settings of Servers plan are now available in connector level @@ -30,7 +31,7 @@ There are now connector-level settings for Defender for Servers in multicloud. The new connector-level settings provide granularity for pricing and auto-provisioning configuration per connector, independently of the subscription. All auto-provisioning components available in the connector level (Azure Arc, MDE, and vulnerability assessments) are enabled by default, and the new configuration supports both [Plan 1 and Plan 2 pricing tiers](defender-for-servers-introduction.md#what-are-the-microsoft-defender-for-server-plans). - + Updates in the UI include a reflection of the selected pricing tier and the required components configured. :::image type="content" source="media/release-notes/main-page.png" alt-text="Screenshot of the main plan page with the Server plan multicloud settings." lightbox="media/release-notes/main-page.png"::: @@ -53,6 +54,13 @@ When you [connect AWS accounts](quickstart-onboard-aws.md), JIT will automatical Learn how [JIT protects your AWS EC2 instances](just-in-time-access-overview.md#how-jit-operates-with-network-resources-in-azure-and-aws) +### Add and remove the Defender profile for AKS clusters from the CLI + +The Defender profile (preview) is required for Defender for Containers to provide the runtime protections and collects signals from nodes. You can now use the Azure CLI to [add and remove the Defender profile](defender-for-containers-enable.md?tabs=k8s-deploy-cli%2Ck8s-deploy-asc%2Ck8s-verify-asc%2Ck8s-remove-arc%2Ck8s-remove-cli&pivots=defender-for-container-aks#use-azure-cli-to-deploy-the-defender-extension) for an AKS cluster. + +> [!NOTE] +> This option is included in [Azure CLI 3.7 and above](/cli/azure/update-azure-cli.md). + ## April 2022 Updates in April include: diff --git a/articles/defender-for-cloud/review-security-recommendations.md b/articles/defender-for-cloud/review-security-recommendations.md index e764958bf666..99acbe8208b0 100644 --- a/articles/defender-for-cloud/review-security-recommendations.md +++ b/articles/defender-for-cloud/review-security-recommendations.md @@ -2,7 +2,7 @@ title: Security recommendations in Microsoft Defender for Cloud description: This document walks you through how recommendations in Microsoft Defender for Cloud help you protect your Azure resources and stay in compliance with security policies. ms.topic: conceptual -ms.date: 04/03/2022 +ms.date: 05/11/2022 --- # Review your security recommendations @@ -159,6 +159,16 @@ When the report is ready, you'll be notified by a second pop-up. :::image type="content" source="media/review-security-recommendations/downloaded-csv.png" alt-text="Screenshot letting you know your downloaded completed."::: +## Learn more + +If you would like to learn more from the product manager about security posture, check out [Security posture management improvements](episode-four.md). + +You can also check out the following blogs: + +- [Security posture management and server protection for AWS and GCP are now generally available](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/security-posture-management-and-server-protection-for-aws-and/ba-p/3271388) +- [Custom assessments and standards in Microsoft Defender for Cloud for AWS workloads (Preview)](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/custom-assessments-and-standards-in-microsoft-defender-for-cloud/ba-p/3066575) +- [New enhancements added to network security dashboard](https://techcommunity.microsoft.com/t5/microsoft-defender-for-cloud/new-enhancements-added-to-network-security-dashboard/ba-p/2896021) + ## Next steps In this document, you were introduced to security recommendations in Defender for Cloud. For related information: diff --git a/articles/defender-for-iot/organizations/integrate-with-active-directory.md b/articles/defender-for-iot/organizations/integrate-with-active-directory.md index 19e25350ad27..20686cb55b14 100644 --- a/articles/defender-for-iot/organizations/integrate-with-active-directory.md +++ b/articles/defender-for-iot/organizations/integrate-with-active-directory.md @@ -59,4 +59,4 @@ You can associate Active Directory groups defined here with specific permission ## Next steps -For more information, see [how to create and manage users](/azure/defender-for-iot/organizations/how-to-create-and-manage-users). \ No newline at end of file +For more information, see [how to create and manage users](./how-to-create-and-manage-users.md). \ No newline at end of file diff --git a/articles/defender-for-iot/organizations/media/release-notes/appliance-catalog.png b/articles/defender-for-iot/organizations/media/release-notes/appliance-catalog.png new file mode 100644 index 000000000000..cc1ddbe9d2e7 Binary files /dev/null and b/articles/defender-for-iot/organizations/media/release-notes/appliance-catalog.png differ diff --git a/articles/defender-for-iot/organizations/release-notes.md b/articles/defender-for-iot/organizations/release-notes.md index 5f3383d425b1..7995dc43bb4b 100644 --- a/articles/defender-for-iot/organizations/release-notes.md +++ b/articles/defender-for-iot/organizations/release-notes.md @@ -2,7 +2,7 @@ title: What's new in Microsoft Defender for IoT description: This article lets you know what's new in the latest release of Defender for IoT. ms.topic: overview -ms.date: 03/22/2022 +ms.date: 05/25/2022 --- # What's new in Microsoft Defender for IoT? @@ -19,7 +19,7 @@ Noted features listed below are in PREVIEW. The [Azure Preview Supplemental Term The Defender for IoT architecture uses on-premises sensors and management servers. This section describes the servicing information and timelines for the available on-premises software versions. -- Each General Availability (GA) version of the Defender for IoT sensor and on-premises management console software is supported for nine months after release. Fixes and new functionality are applied to each new version and are not applied to older versions. +- Each General Availability (GA) version of the Defender for IoT sensor and on-premises management console software is supported for nine months after release. Fixes and new functionality are applied to each new version and aren't applied to older versions. - Software update packages include new functionality and security patches. Urgent, high-risk security updates are applied in minor versions that may be released throughout the quarter. @@ -41,6 +41,49 @@ For more information, see the [Microsoft Security Development Lifecycle practice | 10.5.3 | 10/2021 | 07/2022 | | 10.5.2 | 10/2021 | 07/2022 | +## May 2022 + +We've recently optimized and enhanced our documentation as follows: + +- [Updated appliance catalog for OT environments](#updated-appliance-catalog-for-ot-environments) +- [Documentation reorganization for end-user organizations](#documentation-reorganization-for-end-user-organizations) + +### Updated appliance catalog for OT environments + +We've refreshed and revamped the catalog of supported appliances for monitoring OT environments. These appliances support flexible deployment options for environments of all sizes and can be used to host both the OT monitoring sensor and on-premises management consoles. + +Use the new pages as follows: + +1. **Understand which hardware model best fits your organization's needs.** For more information, see [Which appliances do I need?](ot-appliance-sizing.md) + +1. **Learn about the preconfigured hardware appliances that are available to purchase, or system requirements for virtual machines.** For more information, see [Pre-configured physical appliances for OT monitoring](ot-pre-configured-appliances.md) and [OT monitoring with virtual appliances](ot-virtual-appliances.md). + + For more information about each appliance type, use the linked reference page, or browse through our new **Reference > OT monitoring appliances** section. + + :::image type="content" source="media/release-notes/appliance-catalog.png" alt-text="Screenshot of the new appliance catalog reference section." lightbox="media/release-notes/appliance-catalog.png"::: + + Reference articles for each appliance type, including virtual appliances, include specific steps to configure the appliance for OT monitoring with Defender for IoT. Generic software installation and troubleshooting procedures are still documented in [Defender for IoT software installation](how-to-install-software.md). + +### Documentation reorganization for end-user organizations + +We recently reorganized our Defender for IoT documentation for end-user organizations, highlighting a clearer path for onboarding and getting started. + +Check out our new structure to follow through viewing devices and assets, managing alerts, vulnerabilities and threats, integrating with other services, and deploying and maintaining your Defender for IoT system. + +**New and updated articles include**: + +- [Welcome to Microsoft Defender for IoT for organizations](overview.md) +- [Microsoft Defender for IoT architecture](architecture.md) +- [Quickstart: Get started with Defender for IoT](getting-started.md) +- [Tutorial: Microsoft Defender for IoT trial setup](tutorial-onboarding.md) +- [Tutorial: Get started with Enterprise IoT](tutorial-getting-started-eiot-sensor.md) +- [Plan your sensor connections for OT monitoring](plan-network-monitoring.md) +- [About Microsoft Defender for IoT network setup](how-to-set-up-your-network.md) + +> [!NOTE] +> To send feedback on docs via GitHub, scroll to the bottom of the page and select the **Feedback** option for **This page**. We'd be glad to hear from you! +> + ## April 2022 **Sensor software version**: 22.1.4 @@ -219,7 +262,7 @@ Other alert updates include: - **Access contextual data** for each alert, such as events that occurred around the same time, or a map of connected devices. Maps of connected devices are available for sensor console alerts only. -- **Alert statuses** are updated, and for example now include a *Closed* status instead of *Acknowledged*. +- **Alert statuses** are updated, and, for example, now include a *Closed* status instead of *Acknowledged*. - **Alert storage** for 90 days from the time that they're first detected. @@ -462,4 +505,4 @@ Unicode characters are now supported when working with sensor certificate passph ## Next steps -[Getting started with Defender for IoT](getting-started.md) \ No newline at end of file +[Getting started with Defender for IoT](getting-started.md) diff --git a/articles/digital-twins/.openpublishing.redirection.digital-twins.json b/articles/digital-twins/.openpublishing.redirection.digital-twins.json index 7bdf54312c7f..5ae59a46ca6a 100644 --- a/articles/digital-twins/.openpublishing.redirection.digital-twins.json +++ b/articles/digital-twins/.openpublishing.redirection.digital-twins.json @@ -1,5 +1,15 @@ { "redirections": [ + { + "source_path": "how-to-create-app-registration-portal.md", + "redirect_url": "/azure/digital-twins/how-to-create-app-registration", + "redirect_document_id": true + }, + { + "source_path": "how-to-create-app-registration-cli.md", + "redirect_url": "/azure/digital-twins/how-to-create-app-registration", + "redirect_document_id": false + }, { "source_path": "how-to-set-up-instance-powershell.md", "redirect_url": "/azure/digital-twins/how-to-set-up-instance-cli", @@ -25,11 +35,6 @@ "redirect_url": "/azure/digital-twins/how-to-monitor-resource-health", "redirect_document_id": true }, - { - "source_path": "how-to-create-app-registration.md", - "redirect_url": "/azure/digital-twins/how-to-create-app-registration-portal", - "redirect_document_id": true - }, { "source_path": "concepts-integration.md", "redirect_url": "/azure/digital-twins/concepts-data-ingress-egress", diff --git a/articles/digital-twins/TOC.yml b/articles/digital-twins/TOC.yml index d9f08c89ffd0..ec799e3c7a9e 100644 --- a/articles/digital-twins/TOC.yml +++ b/articles/digital-twins/TOC.yml @@ -110,11 +110,7 @@ - name: Write app authentication code href: how-to-authenticate-client.md - name: Create an app registration with Azure Digital Twins access - items: - - name: Portal - href: how-to-create-app-registration-portal.md - - name: CLI - href: how-to-create-app-registration-cli.md + href: how-to-create-app-registration.md - name: Integrate with Azure SignalR Service href: how-to-integrate-azure-signalr.md - name: Connect input diff --git a/articles/digital-twins/concepts-3d-scenes-studio.md b/articles/digital-twins/concepts-3d-scenes-studio.md index 55957ea7684b..cb3080720e2e 100644 --- a/articles/digital-twins/concepts-3d-scenes-studio.md +++ b/articles/digital-twins/concepts-3d-scenes-studio.md @@ -54,10 +54,10 @@ To work with 3D Scenes Studio, you'll need the following required resources: * You'll need *Azure Digital Twins Data Owner* or *Azure Digital Twins Data Reader* access to the instance * The instance should be populated with [models](concepts-models.md) and [twins](concepts-twins-graph.md) -* An [Azure storage account](/azure/storage/common/storage-account-create?tabs=azure-portal), and a [private container](/azure/storage/blobs/storage-quickstart-blobs-portal#create-a-container) in the storage account +* An [Azure storage account](../storage/common/storage-account-create.md?tabs=azure-portal), and a [private container](../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container) in the storage account * To **view** 3D scenes, you'll need at least *Storage Blob Data Reader* access to these storage resources. To **build** 3D scenes, you'll need *Storage Blob Data Contributor* or *Storage Blob Data Owner* access. - You can grant required roles at either the storage account level or the container level. For more information about Azure storage permissions, see [Assign an Azure role](/azure/storage/blobs/assign-azure-role-data-access?tabs=portal#assign-an-azure-role). + You can grant required roles at either the storage account level or the container level. For more information about Azure storage permissions, see [Assign an Azure role](../storage/blobs/assign-azure-role-data-access.md?tabs=portal#assign-an-azure-role). * You should also configure [CORS](/rest/api/storageservices/cross-origin-resource-sharing--cors--support-for-the-azure-storage-services) for your storage account, so that 3D Scenes Studio will be able to access your storage container. For complete CORS setting information, see [Use 3D Scenes Studio (preview)](how-to-use-3d-scenes-studio.md#prerequisites). Then, you can access 3D Scenes Studio at this link: [3D Scenes Studio](https://dev.explorer.azuredigitaltwins-test.net/3dscenes). @@ -137,4 +137,4 @@ These limits are recommended because 3D Scenes Studio leverages the standard [Az Try out 3D Scenes Studio with a sample scenario in [Get started with 3D Scenes Studio](quickstart-3d-scenes-studio.md). -Or, learn how to use the studio's full feature set in [Use 3D Scenes Studio](how-to-use-3d-scenes-studio.md). +Or, learn how to use the studio's full feature set in [Use 3D Scenes Studio](how-to-use-3d-scenes-studio.md). \ No newline at end of file diff --git a/articles/digital-twins/how-to-authenticate-client.md b/articles/digital-twins/how-to-authenticate-client.md index fed4e7acb316..3af3e333fb61 100644 --- a/articles/digital-twins/how-to-authenticate-client.md +++ b/articles/digital-twins/how-to-authenticate-client.md @@ -80,7 +80,7 @@ The [ManagedIdentityCredential](/dotnet/api/azure.identity.managedidentitycreden This means that you may use `ManagedIdentityCredential` in the same project as `DefaultAzureCredential` or `InteractiveBrowserCredential`, to authenticate a different part of the project. -To use the default Azure credentials, you'll need the Azure Digital Twins instance's URL ([instructions to find](how-to-set-up-instance-portal.md#verify-success-and-collect-important-values)). You may also need an [app registration](./how-to-create-app-registration-portal.md) and the registration's [Application (client) ID](./how-to-create-app-registration-portal.md#collect-client-id-and-tenant-id). +To use the default Azure credentials, you'll need the Azure Digital Twins instance's URL ([instructions to find](how-to-set-up-instance-portal.md#verify-success-and-collect-important-values)). You may also need an [app registration](./how-to-create-app-registration.md) and the registration's [Application (client) ID](./how-to-create-app-registration.md#collect-client-id-and-tenant-id). In an Azure function, you can use the managed identity credentials like this: @@ -90,9 +90,9 @@ In an Azure function, you can use the managed identity credentials like this: The [InteractiveBrowserCredential](/dotnet/api/azure.identity.interactivebrowsercredential?view=azure-dotnet&preserve-view=true) method is intended for interactive applications and will bring up a web browser for authentication. You can use this method instead of `DefaultAzureCredential` in cases where you require interactive authentication. -To use the interactive browser credentials, you'll need an **app registration** that has permissions to the Azure Digital Twins APIs. For steps on how to set up this app registration, see [Create an app registration with Azure Digital Twins access](./how-to-create-app-registration-portal.md). Once the app registration is set up, you'll need... -* [the app registration's Application (client) ID](./how-to-create-app-registration-portal.md#collect-client-id-and-tenant-id) -* [the app registration's Directory (tenant) ID](./how-to-create-app-registration-portal.md#collect-client-id-and-tenant-id) +To use the interactive browser credentials, you'll need an **app registration** that has permissions to the Azure Digital Twins APIs. For steps on how to set up this app registration, see [Create an app registration with Azure Digital Twins access](./how-to-create-app-registration.md). Once the app registration is set up, you'll need... +* [the app registration's Application (client) ID](./how-to-create-app-registration.md#collect-client-id-and-tenant-id) +* [the app registration's Directory (tenant) ID](./how-to-create-app-registration.md#collect-client-id-and-tenant-id) * [the Azure Digital Twins instance's URL](how-to-set-up-instance-portal.md#verify-success-and-collect-important-values) Here's an example of the code to create an authenticated SDK client using `InteractiveBrowserCredential`. diff --git a/articles/digital-twins/how-to-create-app-registration-cli.md b/articles/digital-twins/how-to-create-app-registration-cli.md deleted file mode 100644 index a5b7f167d4fa..000000000000 --- a/articles/digital-twins/how-to-create-app-registration-cli.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -# Mandatory fields. -title: Create an app registration with Azure Digital Twins access (CLI) -titleSuffix: Azure Digital Twins -description: Use the CLI to create an Azure AD app registration that can access Azure Digital Twins resources. -author: baanders -ms.author: baanders # Microsoft employees only -ms.date: 2/24/2022 -ms.topic: how-to -ms.service: digital-twins -ms.custom: contperf-fy22q3 - -# Optional fields. Don't forget to remove # if you need a field. -# ms.custom: can-be-multiple-comma-separated -# ms.reviewer: MSFT-alias-of-reviewer -# manager: MSFT-alias-of-manager-or-PM-counterpart ---- - -# Create an app registration to use with Azure Digital Twins (CLI) - -[!INCLUDE [digital-twins-create-app-registration-selector.md](../../includes/digital-twins-create-app-registration-selector.md)] - -This article describes how to use the Azure CLI to create an [Azure Active Directory (Azure AD)](../active-directory/fundamentals/active-directory-whatis.md) *app registration* that can access Azure Digital Twins. - -When working with Azure Digital Twins, it's common to interact with your instance through client applications. Those applications need to authenticate with Azure Digital Twins, and some of the [authentication mechanisms](how-to-authenticate-client.md) that apps can use involve an app registration. - -The app registration isn't required for all authentication scenarios. However, if you're using an authentication strategy or code sample that does require an app registration, this article shows you how to set one up and grant it permissions to the Azure Digital Twins APIs. It also covers how to collect important values that you'll need to use the app registration when authenticating. - ->[!TIP] -> You may prefer to set up a new app registration every time you need one, or to do this only once, establishing a single app registration that will be shared among all scenarios that require it. - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../includes/azure-cli-prepare-your-environment.md)] - -## Create manifest - -First, create a file containing certain service information that your app registration will need to access the Azure Digital Twins APIs. Later, you'll pass in this file when creating the app registration, to set up the Azure Digital Twins permissions. - -Create a new .json file on your computer called *manifest.json*. Copy this text into the file: - -```json -[ - { - "resourceAppId": "0b07f429-9f4b-4714-9392-cc5e8e80c8b0", - "resourceAccess": [ - { - "id": "4589bd03-58cb-4e6c-b17f-b580e39652f8", - "type": "Scope" - } - ] - } -] -``` - -The static value `0b07f429-9f4b-4714-9392-cc5e8e80c8b0` is the resource ID for the Azure Digital Twins service endpoint, which your app registration will need to access the Azure Digital Twins APIs. - -Save the finished file. - -### Cloud Shell users: Upload manifest - -If you're using Cloud Shell for this tutorial, you'll need to upload the manifest file you created to the Cloud Shell, so that you can access it in Cloud Shell commands when configuring the app registration. If you're using a local installation of the Azure CLI, you can skip this step. - -To upload the file, go to the Cloud Shell window in your browser. Select the "Upload/Download files" icon and choose "Upload". - -:::image type="content" source="media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png" alt-text="Screenshot of Azure Cloud Shell. The Upload icon is highlighted."::: - -Navigate to the *manifest.json* file on your machine and select **Open**. Doing so will upload the file to the root of your Cloud Shell storage. - -## Create the registration - -In this section, you'll run a CLI command to create an app registration with the following settings: -* Name of your choice -* Available only to accounts in the default directory (single tenant) -* A web reply URL of `http://localhost` -* Read/write permissions to the Azure Digital Twins APIs - -Run the following command to create the registration. If you're using Cloud Shell, the path to the manifest.json file is `@manifest.json`. - -```azurecli-interactive -az ad app create --display-name --available-to-other-tenants false --reply-urls http://localhost --native-app --required-resource-accesses "" -``` - -The output of the command is information about the app registration you've created. - -## Verify success - -You can confirm that the Azure Digital Twins permissions were granted by looking for the following fields in the output of the `az ad app create` command, and confirming their values match what's shown in the screenshot below: - -:::image type="content" source="media/how-to-create-app-registration/cli-required-resource-access.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The items under 'requiredResourceAccess' are highlighted: there's a 'resourceAppId' value of 0b07f429-9f4b-4714-9392-cc5e8e80c8b0, and a 'resourceAccess > id' value of 4589bd03-58cb-4e6c-b17f-b580e39652f8."::: - -You can also verify the app registration was successfully created with the necessary API permissions by using the Azure portal. For portal instructions, see [Verify API permissions (portal)](how-to-create-app-registration-portal.md#verify-api-permissions). - -## Collect important values - -Next, collect some important values about the app registration that you'll need to use the app registration to authenticate a client application. These values include: -* resource name -* client ID -* tenant ID -* client secret - -To work with Azure Digital Twins, the resource name is `http://digitaltwins.azure.net`. - -The following sections describe how to find the other values. - -### Collect client ID and tenant ID - -To use the app registration for authentication, you may need to provide its **Application (client) ID** and **Directory (tenant) ID**. In this section, you'll collect these values so you can save them and use them whenever they're needed. - -You can find both of these values in the output from the `az ad app create` command. - -Application (client) ID: - -:::image type="content" source="media/how-to-create-app-registration/cli-app-id.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The appId value is highlighted."::: - -Directory (tenant) ID: - -:::image type="content" source="media/how-to-create-app-registration/cli-tenant-id.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The GUID value in the odata.metadata is highlighted."::: - -### Collect client secret - -To create a client secret for your app registration, you'll need your app registration's client ID value from the previous section. Use the value in the following CLI command to create a new secret: - -```azurecli-interactive -az ad app credential reset --id --append -``` - -You can also add optional parameters to this command to specify a credential description, end date, and other details. For more information about the command and its parameters, see [az ad app credential reset documentation](/cli/azure/ad/app/credential#az-ad-app-credential-reset). - -The output of this command is information about the client secret that you've created. Copy the value for `password` to use when you need the client secret for authentication. - -:::image type="content" source="media/how-to-create-app-registration/cli-client-secret.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The password value is highlighted."::: - ->[!IMPORTANT] ->Make sure to copy the value now and store it in a safe place, as it cannot be retrieved again. If you can't find the value later, you'll have to create a new secret. - -## Create Azure Digital Twins role assignment - -In this section, you'll create a role assignment for the app registration to set its permissions on the Azure Digital Twins instance. This role will determine what permissions the app registration holds on the instance, so you should select the role that matches the appropriate level of permission for your situation. One possible role is [Azure Digital Twins Data Owner](../role-based-access-control/built-in-roles.md#azure-digital-twins-data-owner). For a full list of roles and their descriptions, see [Azure built-in roles](../role-based-access-control/built-in-roles.md). - -Use the following command to assign the role (must be run by a user with [sufficient permissions](how-to-set-up-instance-cli.md#prerequisites-permission-requirements) in the Azure subscription). The command requires you to pass in the name of the app registration. - -```azurecli-interactive -az dt role-assignment create --dt-name --assignee "" --role "" -``` - -The result of this command is outputted information about the role assignment that's been created for the app registration. - -### Verify role assignment - -To further verify the role assignment, you can look for it in the Azure portal. Follow the instructions in [Verify role assignment (portal)](how-to-create-app-registration-portal.md#verify-role-assignment). - -## Other possible steps for your organization - -It's possible that your organization requires more actions from subscription Owners/administrators to successfully set up an app registration. The steps required may vary depending on your organization's specific settings. - -Here are some common potential activities that an Owner or administrator on the subscription may need to do. -* Grant admin consent for the app registration. Your organization may have **Admin Consent Required** globally turned on in Azure AD for all app registrations within your subscription. If so, the Owner/administrator may need to grant additional delegated or application permissions. -* Activate public client access by appending `--set publicClient=true` to a create or update command for the registration. -* Set specific reply URLs for web and desktop access using the `--reply-urls` parameter. For more information on using this parameter with `az ad` commands, see the [az ad app documentation](/cli/azure/ad/app). -* Allow for implicit OAuth2 authentication flows using the `--oauth2-allow-implicit-flow` parameter. For more information on using this parameter with `az ad` commands, see the [az ad app documentation](/cli/azure/ad/app). - -For more information about app registration and its different setup options, see [Register an application with the Microsoft identity platform](/graph/auth-register-app-v2). - -## Next steps - -In this article, you set up an Azure AD app registration that can be used to authenticate client applications with the Azure Digital Twins APIs. - -Next, read about authentication mechanisms, including one that uses app registrations and others that don't: -* [Write app authentication code](how-to-authenticate-client.md) \ No newline at end of file diff --git a/articles/digital-twins/how-to-create-app-registration-portal.md b/articles/digital-twins/how-to-create-app-registration.md similarity index 52% rename from articles/digital-twins/how-to-create-app-registration-portal.md rename to articles/digital-twins/how-to-create-app-registration.md index e5c874a53ebf..7c720d44b559 100644 --- a/articles/digital-twins/how-to-create-app-registration-portal.md +++ b/articles/digital-twins/how-to-create-app-registration.md @@ -1,13 +1,14 @@ --- # Mandatory fields. -title: Create an app registration with Azure Digital Twins access (portal) +title: Create an app registration with Azure Digital Twins access titleSuffix: Azure Digital Twins -description: Use the Azure portal to create an Azure AD app registration that can access Azure Digital Twins resources. +description: Create an Azure Active Directory app registration that can access Azure Digital Twins resources. author: baanders ms.author: baanders # Microsoft employees only -ms.date: 2/24/2022 +ms.date: 5/25/2022 ms.topic: how-to ms.service: digital-twins +ms.custom: contperf-fy22q4 # Optional fields. Don't forget to remove # if you need a field. # ms.custom: can-be-multiple-comma-separated @@ -15,11 +16,9 @@ ms.service: digital-twins # manager: MSFT-alias-of-manager-or-PM-counterpart --- -# Create an app registration to use with Azure Digital Twins (portal) +# Create an app registration to use with Azure Digital Twins -[!INCLUDE [digital-twins-create-app-registration-selector.md](../../includes/digital-twins-create-app-registration-selector.md)] - -This article describes how to use the Azure portal to create an [Azure Active Directory (Azure AD)](../active-directory/fundamentals/active-directory-whatis.md) *app registration* that can access Azure Digital Twins. +This article describes how to create an [Azure Active Directory (Azure AD)](../active-directory/fundamentals/active-directory-whatis.md) *app registration* that can access Azure Digital Twins. This article includes steps for the [Azure portal](https://portal.azure.com) and the [Azure CLI](/cli/azure/what-is-azure-cli). When working with Azure Digital Twins, it's common to interact with your instance through client applications. Those applications need to authenticate with Azure Digital Twins, and some of the [authentication mechanisms](how-to-authenticate-client.md) that apps can use involve an app registration. @@ -30,7 +29,11 @@ The app registration isn't required for all authentication scenarios. However, i ## Create the registration -Start by navigating to [Azure Active Directory](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview) in the Azure portal (you can use this link or find it with the portal search bar). Select **App registrations** from the service menu, and then **+ New registration**. +Start by selecting the tab below for your preferred interface. + +# [Portal](#tab/portal) + +Navigate to [Azure Active Directory](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview) in the Azure portal (you can use this link or find it with the portal search bar). Select **App registrations** from the service menu, and then **+ New registration**. :::image type="content" source="media/how-to-create-app-registration/new-registration.png" alt-text="Screenshot of the Azure AD service page in the Azure portal, showing the steps to create a new registration in the 'App registrations' page."::: @@ -45,29 +48,113 @@ When you're finished, select the **Register** button. When the registration is finished setting up, the portal will redirect you to its details page. -## Collect important values +# [CLI](#tab/cli) + +Start by creating a manifest file, which contains service information that your app registration will need to access the Azure Digital Twins APIs. Afterwards, you'll pass this file into a CLI command to create the registration. + +### Create manifest + +Create a new .json file on your computer called *manifest.json*. Copy this text into the file: + +```json +[ + { + "resourceAppId": "0b07f429-9f4b-4714-9392-cc5e8e80c8b0", + "resourceAccess": [ + { + "id": "4589bd03-58cb-4e6c-b17f-b580e39652f8", + "type": "Scope" + } + ] + } +] +``` + +The static value `0b07f429-9f4b-4714-9392-cc5e8e80c8b0` is the resource ID for the Azure Digital Twins service endpoint, which your app registration will need to access the Azure Digital Twins APIs. + +Save the finished file. + +### Cloud Shell users: Upload manifest + +If you're using Azure Cloud Shell for this tutorial, you'll need to upload the manifest file you created to the Cloud Shell, so that you can access it in Cloud Shell commands when configuring the app registration. If you're using a local installation of the Azure CLI, you can skip ahead to the next step, [Run the creation command](#run-the-creation-command). + +To upload the file, go to the Cloud Shell window in your browser. Select the "Upload/Download files" icon and choose "Upload". + +:::image type="content" source="media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png" alt-text="Screenshot of Azure Cloud Shell. The Upload icon is highlighted."::: + +Navigate to the *manifest.json* file on your machine and select **Open**. Doing so will upload the file to the root of your Cloud Shell storage. + +### Run the creation command + +In this section, you'll run a CLI command to create an app registration with the following settings: +* Name of your choice +* Available only to accounts in the default directory (single tenant) +* A web reply URL of `http://localhost` +* Read/write permissions to the Azure Digital Twins APIs + +Run the following command to create the registration. If you're using Cloud Shell, the path to the manifest.json file is `@manifest.json`. + +```azurecli-interactive +az ad app create --display-name --available-to-other-tenants false --reply-urls http://localhost --native-app --required-resource-accesses "" +``` + +The output of the command is information about the app registration you've created. + +### Verify success + +You can confirm that the Azure Digital Twins permissions were granted by looking for the following fields in the output of the creation command, under `requiredResourceAccess`. Confirm their values match what's listed below. +* `resourceAccess > id` is *4589bd03-58cb-4e6c-b17f-b580e39652f8* +* `resourceAppId` is *0b07f429-9f4b-4714-9392-cc5e8e80c8b0* + +:::image type="content" source="media/how-to-create-app-registration/cli-required-resource-access.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command."::: + +--- + +## Collect important values Next, collect some important values about the app registration that you'll need to use the app registration to authenticate a client application. These values include: -* resource name +* resource name — When working with Azure Digital Twins, the **resource name** is `http://digitaltwins.azure.net`. * client ID * tenant ID * client secret -To work with Azure Digital Twins, the resource name is `http://digitaltwins.azure.net`. - -The following sections describe how to find the other values. +The following sections describe how to find the remaining values. ### Collect client ID and tenant ID +To use the app registration for authentication, you may need to provide its **Application (client) ID** and **Directory (tenant) ID**. Here, you'll collect these values so you can save them and use them whenever they're needed. + +# [Portal](#tab/portal) + The client ID and tenant ID values can be collected from the app registration's details page in the Azure portal: :::image type="content" source="media/how-to-create-app-registration/client-id-tenant-id.png" alt-text="Screenshot of the Azure portal showing the important values for the app registration." lightbox="media/how-to-create-app-registration/client-id-tenant-id.png"::: Take note of the **Application (client) ID** and **Directory (tenant) ID** shown on your page. +# [CLI](#tab/cli) + +You can find both of these values in the output from the `az ad app create` command that you ran [earlier](#run-the-creation-command). (You can also bring up the app registration's information again using [az ad app show](/cli/azure/ad/app#az-ad-app-show).) + +Look for these values in the result: + +Application (client) ID: + +:::image type="content" source="media/how-to-create-app-registration/cli-app-id.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The appId value is highlighted."::: + +Directory (tenant) ID: + +:::image type="content" source="media/how-to-create-app-registration/cli-tenant-id.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The GUID value in the odata.metadata is highlighted."::: + +--- + ### Collect client secret -To set up a client secret for your app registration, start on your app registration page in the Azure portal. +Set up a client secret for your app registration, which other applications can use to authenticate through it. + +# [Portal](#tab/portal) + +Start on your app registration page in the Azure portal. 1. Select **Certificates & secrets** from the registration's menu, and then select **+ New client secret**. @@ -92,15 +179,42 @@ To set up a client secret for your app registration, start on your app registrat >[!IMPORTANT] >Make sure to copy the values now and store them in a safe place, as they can't be retrieved again. If you can't find them later, you'll have to create a new secret. +# [CLI](#tab/cli) + +To create a client secret for your app registration, you'll need your app registration's client ID value that you collected in the [previous step](#collect-client-id-and-tenant-id). Use the value in the following CLI command to create a new secret: + +```azurecli-interactive +az ad app credential reset --id --append +``` + +You can also add optional parameters to this command to specify a credential description, end date, and other details. For more information about the command and its parameters, see [az ad app credential reset documentation](/cli/azure/ad/app/credential#az-ad-app-credential-reset). + +The output of this command is information about the client secret that you've created. + +Copy the value for `password` to use when you need the client secret for authentication. + +:::image type="content" source="media/how-to-create-app-registration/cli-client-secret.png" alt-text="Screenshot of Cloud Shell output of the app registration creation command. The password value is highlighted."::: + +>[!IMPORTANT] +>Make sure to copy the value now and store it in a safe place, as it cannot be retrieved again. If you can't find the value later, you'll have to create a new secret. + +--- + ## Provide Azure Digital Twins permissions -Next, configure the app registration you've created with permissions to access Azure Digital Twins. First, you'll create a role assignment for the app registration within the Azure Digital Twins instance. Then, you'll provide API permissions for the app to read and write to the Azure Digital Twins APIs. +Next, configure the app registration you've created with permissions to access Azure Digital Twins. There are two types of permissions that are required: +* A role assignment for the app registration within the Azure Digital Twins instance +* API permissions for the app to read and write to the Azure Digital Twins APIs ### Create role assignment In this section, you'll create a role assignment for the app registration on the Azure Digital Twins instance. This role will determine what permissions the app registration holds on the instance, so you should select the role that matches the appropriate level of permission for your situation. One possible role is [Azure Digital Twins Data Owner](../role-based-access-control/built-in-roles.md#azure-digital-twins-data-owner). For a full list of roles and their descriptions, see [Azure built-in roles](../role-based-access-control/built-in-roles.md). -1. First, open the page for your Azure Digital Twins instance in the Azure portal. +# [Portal](#tab/portal) + +Use these steps to create the role assignment for your registration. + +1. Open the page for your Azure Digital Twins instance in the Azure portal. 1. Select **Access control (IAM)**. @@ -124,10 +238,28 @@ You can view the role assignment you've set up under **Access control (IAM) > Ro The app registration should show up in the list along with the role you assigned to it. +# [CLI](#tab/cli) + +Use the [az dt role-assignment create](/cli/azure/dt/role-assignment#az-dt-role-assignment-create) command to assign the role (it must be run by a user with [sufficient permissions](how-to-set-up-instance-cli.md#prerequisites-permission-requirements) in the Azure subscription). The command requires you to pass in the name of the role you want to assign, the name of your Azure Digital Twins instance, and either the name or the object ID of the app registration. + +```azurecli-interactive +az dt role-assignment create --dt-name --assignee "" --role "" +``` + +The result of this command is outputted information about the role assignment that's been created for the app registration. + +To further verify the role assignment, you can look for it in the Azure portal (switch to the [Portal instruction tab](?tabs=portal#verify-role-assignment)). + +--- + ### Provide API permissions In this section, you'll grant your app baseline read/write permissions to the Azure Digital Twins APIs. +If you're using the Azure CLI and set up your app registration [earlier](#create-the-registration) with a manifest file, this step is already done. If you're using the Azure portal to create your app registration, continue through the rest of this section to set up API permissions. + +# [Portal](#tab/portal) + From the portal page for your app registration, select **API permissions** from the menu. On the following permissions page, select the **+ Add a permission** button. :::image type="content" source="media/how-to-create-app-registration/add-permission.png" alt-text="Screenshot of the app registration in the Azure portal, highlighting the 'API permissions' menu option and 'Add a permission' button."::: @@ -164,12 +296,22 @@ These values are shown in the screenshot below: If these values are missing, retry the steps in the [section for adding the API permission](#provide-api-permissions). +# [CLI](#tab/cli) + +If you're using the CLI, the API permissions were set up earlier as part of the [Create the registration](#create-the-registration) step. + +You can verify them now using the Azure portal (switch to the [Portal instruction tab](?tabs=portal#verify-api-permissions)). + +--- + ## Other possible steps for your organization -It's possible that your organization requires more actions from subscription Owners/administrators to successfully set up an app registration. The steps required may vary depending on your organization's specific settings. +It's possible that your organization requires more actions from subscription owners or administrators to finish setting up the app registration. The steps required may vary depending on your organization's specific settings. Choose a tab below to see this information tailored to your preferred interface. + +# [Portal](#tab/portal) -Here are some common potential activities that an Owner/administrator on the subscription may need to do. These and other operations can be performed from the [Azure AD App registrations](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps) page in the Azure portal. -* Grant admin consent for the app registration. Your organization may have **Admin Consent Required** globally turned on in Azure AD for all app registrations within your subscription. If so, the Owner/administrator will need to select this button for your company on the app registration's **API permissions** page for the app registration to be valid: +Here are some common potential activities that an owner or administrator on the subscription may need to do. These and other operations can be performed from the [Azure AD App registrations](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps) page in the Azure portal. +* Grant admin consent for the app registration. Your organization may have **Admin Consent Required** globally turned on in Azure AD for all app registrations within your subscription. If so, the owner/administrator will need to select this button for your company on the app registration's **API permissions** page for the app registration to be valid: :::image type="content" source="media/how-to-create-app-registration/grant-admin-consent.png" alt-text="Screenshot of the Azure portal showing the 'Grant admin consent' button under API permissions."::: - If consent was granted successfully, the entry for Azure Digital Twins should then show a **Status** value of **Granted for (your company)** @@ -179,6 +321,16 @@ Here are some common potential activities that an Owner/administrator on the sub * Set specific reply URLs for web and desktop access * Allow for implicit OAuth2 authentication flows +# [CLI](#tab/cli) + +Here are some common potential activities that an owner or administrator on the subscription may need to do. +* Grant admin consent for the app registration. Your organization may have **Admin Consent Required** globally turned on in Azure AD for all app registrations within your subscription. If so, the owner/administrator may need to grant additional delegated or application permissions. +* Activate public client access by appending `--set publicClient=true` to a create or update command for the registration. +* Set specific reply URLs for web and desktop access using the `--reply-urls` parameter. For more information on using this parameter with `az ad` commands, see the [az ad app documentation](/cli/azure/ad/app). +* Allow for implicit OAuth2 authentication flows using the `--oauth2-allow-implicit-flow` parameter. For more information on using this parameter with `az ad` commands, see the [az ad app documentation](/cli/azure/ad/app). + +--- + For more information about app registration and its different setup options, see [Register an application with the Microsoft identity platform](/graph/auth-register-app-v2). ## Next steps diff --git a/articles/digital-twins/how-to-move-regions.md b/articles/digital-twins/how-to-move-regions.md index e71e36a4fef2..8cc8f1aae024 100644 --- a/articles/digital-twins/how-to-move-regions.md +++ b/articles/digital-twins/how-to-move-regions.md @@ -126,7 +126,7 @@ The exact resources you need to edit depends on your scenario, but here are some * Azure Maps. * IoT Hub Device Provisioning Service. * Personal or company apps outside of Azure, such as the client app created in [Code a client app](tutorial-code.md), that connect to the instance and call Azure Digital Twins APIs. -* Azure AD app registrations don't need to be recreated. If you're using an [app registration](./how-to-create-app-registration-portal.md) to connect to the Azure Digital Twins APIs, you can reuse the same app registration with your new instance. +* Azure AD app registrations don't need to be recreated. If you're using an [app registration](./how-to-create-app-registration.md) to connect to the Azure Digital Twins APIs, you can reuse the same app registration with your new instance. After you finish this step, your new instance in the target region should be a copy of the original instance. diff --git a/articles/digital-twins/how-to-use-3d-scenes-studio.md b/articles/digital-twins/how-to-use-3d-scenes-studio.md index dfdb08ea17dd..31698eec313b 100644 --- a/articles/digital-twins/how-to-use-3d-scenes-studio.md +++ b/articles/digital-twins/how-to-use-3d-scenes-studio.md @@ -26,10 +26,10 @@ To use 3D Scenes Studio, you'll need the following resources: * An Azure Digital Twins instance. For instructions, see [Set up an instance and authentication](how-to-set-up-instance-cli.md). * Obtain *Azure Digital Twins Data Owner* or *Azure Digital Twins Data Reader* access to the instance. For instructions, see [Set up user access permissions](how-to-set-up-instance-cli.md#set-up-user-access-permissions). * Take note of the *host name* of your instance to use later. -* An Azure storage account. For instructions, see [Create a storage account](/azure/storage/common/storage-account-create?tabs=azure-portal). -* A private container in the storage account. For instructions, see [Create a container](/azure/storage/blobs/storage-quickstart-blobs-portal#create-a-container). +* An Azure storage account. For instructions, see [Create a storage account](../storage/common/storage-account-create.md?tabs=azure-portal). +* A private container in the storage account. For instructions, see [Create a container](../storage/blobs/storage-quickstart-blobs-portal.md#create-a-container). * Take note of the *URL* of your storage container to use later. -* *Storage Blob Data Owner* or *Storage Blob Data Contributor* access to your storage resources. You can grant required roles at either the storage account level or the container level. For instructions and more information about permissions to Azure storage, see [Assign an Azure role](/azure/storage/blobs/assign-azure-role-data-access?tabs=portal#assign-an-azure-role). +* *Storage Blob Data Owner* or *Storage Blob Data Contributor* access to your storage resources. You can grant required roles at either the storage account level or the container level. For instructions and more information about permissions to Azure storage, see [Assign an Azure role](../storage/blobs/assign-azure-role-data-access.md?tabs=portal#assign-an-azure-role). You should also configure [CORS](/rest/api/storageservices/cross-origin-resource-sharing--cors--support-for-the-azure-storage-services) for your storage account, so that 3D Scenes Studio will be able to access your storage container. You can use the following [Azure CLI](/cli/azure/what-is-azure-cli) command to set the minimum required methods, origins, and headers. The command contains one placeholder for the name of your storage account. @@ -374,4 +374,4 @@ When the recipient pastes this URL into their browser, the specified scene will Try out 3D Scenes Studio with a sample scenario in [Get started with 3D Scenes Studio](quickstart-3d-scenes-studio.md). -Or, visualize your Azure Digital Twins graph differently using [Azure Digital Twins Explorer](how-to-use-azure-digital-twins-explorer.md). +Or, visualize your Azure Digital Twins graph differently using [Azure Digital Twins Explorer](how-to-use-azure-digital-twins-explorer.md). \ No newline at end of file diff --git a/articles/digital-twins/media/how-to-create-app-registration/register-an-application.png b/articles/digital-twins/media/how-to-create-app-registration/register-an-application.png index 65c107cb3c51..c416f3c0b6f2 100644 Binary files a/articles/digital-twins/media/how-to-create-app-registration/register-an-application.png and b/articles/digital-twins/media/how-to-create-app-registration/register-an-application.png differ diff --git a/articles/digital-twins/media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png b/articles/digital-twins/media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png index e3404c3e3b69..27f78c5c29ec 100644 Binary files a/articles/digital-twins/media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png and b/articles/digital-twins/media/how-to-set-up-instance/cloud-shell/cloud-shell-upload.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png index 73427178b997..8e7cdb29fbc6 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-alerts-expression.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png index 2e3e8f264600..f5d9a4e70d49 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/new-behavior-widgets-value-expression.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-view.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-view.png index 87f788ae90e6..bfc303c7bae1 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-view.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/scene-view.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/share-scene.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/share-scene.png index 68e85ee4b6ba..925cb959026d 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/share-scene.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/share-scene.png differ diff --git a/articles/digital-twins/media/how-to-use-3d-scenes-studio/theme.png b/articles/digital-twins/media/how-to-use-3d-scenes-studio/theme.png index 656461bf33a2..6ad7191021bd 100644 Binary files a/articles/digital-twins/media/how-to-use-3d-scenes-studio/theme.png and b/articles/digital-twins/media/how-to-use-3d-scenes-studio/theme.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png index 8e97c61c007c..53ae361ee628 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element-2.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element.png index a54d4f0af75c..955e6d3a8048 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/distribution-scene-view-element.png differ diff --git a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-full.png b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-full.png index fa271f3e6e43..f32d76e303c7 100644 Binary files a/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-full.png and b/articles/digital-twins/media/quickstart-3d-scenes-studio/studio-full.png differ diff --git a/articles/digital-twins/quickstart-3d-scenes-studio.md b/articles/digital-twins/quickstart-3d-scenes-studio.md index e0a331193fa3..ed1cd46990e0 100644 --- a/articles/digital-twins/quickstart-3d-scenes-studio.md +++ b/articles/digital-twins/quickstart-3d-scenes-studio.md @@ -89,7 +89,7 @@ To see the models that have been uploaded and how they relate to each other, sel Next, create a new storage account and a container in the storage account. 3D Scenes Studio will use this storage container to store your 3D file and configuration information. -You'll also set up read and write permissions to the storage account. In order to set these backing resources up quickly, this section uses the [Azure Cloud Shell](/azure/cloud-shell/overview). +You'll also set up read and write permissions to the storage account. In order to set these backing resources up quickly, this section uses the [Azure Cloud Shell](../cloud-shell/overview.md). 1. Navigate to the [Cloud Shell](https://shell.azure.com) in your browser. @@ -314,4 +314,4 @@ You may also want to delete the downloaded sample 3D file from your local machin Next, continue on to the Azure Digital Twins tutorials to build out your own Azure Digital Twins environment. > [!div class="nextstepaction"] -> [Code a client app](tutorial-code.md) +> [Code a client app](tutorial-code.md) \ No newline at end of file diff --git a/articles/digital-twins/troubleshoot-error-403.md b/articles/digital-twins/troubleshoot-error-403.md index 98b8caf26379..499c04d20700 100644 --- a/articles/digital-twins/troubleshoot-error-403.md +++ b/articles/digital-twins/troubleshoot-error-403.md @@ -25,7 +25,7 @@ Most often, this error indicates that your Azure role-based access control (Azur ### Cause #2 -If you're using a client app to communicate with Azure Digital Twins that's authenticating with an [app registration](./how-to-create-app-registration-portal.md), this error may happen because your app registration doesn't have permissions set up for the Azure Digital Twins service. +If you're using a client app to communicate with Azure Digital Twins that's authenticating with an [app registration](./how-to-create-app-registration.md), this error may happen because your app registration doesn't have permissions set up for the Azure Digital Twins service. The app registration must have access permissions configured for the Azure Digital Twins APIs. Then, when your client app authenticates against the app registration, it will be granted the permissions that the app registration has configured. @@ -82,7 +82,7 @@ Next, select **API permissions** from the menu bar to verify that this app regis #### Fix issues -If any of this appears differently than described, follow the instructions on how to set up an app registration in [Create an app registration with Azure Digital Twins access](./how-to-create-app-registration-portal.md). +If any of this appears differently than described, follow the instructions on how to set up an app registration in [Create an app registration with Azure Digital Twins access](./how-to-create-app-registration.md). ## Next steps diff --git a/articles/dns/dns-delegate-domain-azure-dns.md b/articles/dns/dns-delegate-domain-azure-dns.md index a67ec78508aa..0dcd3d79499e 100644 --- a/articles/dns/dns-delegate-domain-azure-dns.md +++ b/articles/dns/dns-delegate-domain-azure-dns.md @@ -5,7 +5,7 @@ services: dns author: rohinkoul ms.service: dns ms.topic: tutorial -ms.date: 04/19/2021 +ms.date: 05/25/2022 ms.author: rohink #Customer intent: As an experienced network administrator, I want to configure Azure DNS, so I can host DNS zones. --- @@ -38,32 +38,32 @@ In this example, we'll reference the parent domain a `contoso.net`. 1. Go to the [Azure portal](https://portal.azure.com/) to create a DNS zone. Search for and select **DNS zones**. - ![DNS zone](./media/dns-delegate-domain-azure-dns/openzone650.png) +1. Select **+ Create**. -1. Select **Create DNS zone**. - -1. On the **Create DNS zone** page, enter the following values, and then select **Create**. For example, `contoso.net`. - - > [!NOTE] - > If the new zone that you are creating is a child zone (e.g. Parent zone = `contoso.net` Child zone = `child.contoso.net`), please refer to our [Creating a new Child DNS zone tutorial](./tutorial-public-dns-zones-child.md) +1. On the **Create DNS zone** page, enter the following values, and then select **Review + create**. | **Setting** | **Value** | **Details** | |--|--|--| - | **Resource group** | ContosoRG | Create a resource group. The resource group name must be unique within the subscription that you selected. The location of the resource group has no impact on the DNS zone. The DNS zone location is always "global," and isn't shown. | - | **Zone child** | leave unchecked | Since this zone is **not** a [child zone](./tutorial-public-dns-zones-child.md) you should leave this unchecked | - | **Name** | `contoso.net` | Field for your parent zone name | - | **Location** | East US | This field is based on the location selected as part of Resource group creation | + | **Resource group** | *ContosoRG* | Create a resource group. The resource group name must be unique within the subscription that you selected. The location of the resource group doesn't affect the DNS zone. The DNS zone location is always "global," and isn't shown. | + | **This zone is a child of an existing zone already hosted in Azure DNS** | leave unchecked | Leave this box unchecked since the DNS zone is **not** a [child zone](./tutorial-public-dns-zones-child.md). | + | **Name** | *contoso.net* | Enter your parent DNS zone name | + | **Resource group location** | *East US* | This field is based on the location selected as part of Resource group creation | +1. Select **Create**. + + + > [!NOTE] + > If the new zone that you are creating is a child zone (e.g. Parent zone = `contoso.net` Child zone = `child.contoso.net`), please refer to our [Creating a new Child DNS zone tutorial](./tutorial-public-dns-zones-child.md) ## Retrieve name servers Before you can delegate your DNS zone to Azure DNS, you need to know the name servers for your zone. Azure DNS gives name servers from a pool each time a zone is created. -1. With the DNS zone created, in the Azure portal **Favorites** pane, select **All resources**. On the **All resources** page, select your DNS zone. If the subscription you've selected already has several resources in it, you can enter your domain name in the **Filter by name** box to easily access the application gateway. +1. Select **Resource groups** in the left-hand menu, select the **ContosoRG** resource group, and then from the **Resources** list, select **contoso.net** DNS zone. -1. Retrieve the name servers from the DNS zone page. In this example, the zone `contoso.net` has been assigned name servers `ns1-01.azure-dns.com`, `ns2-01.azure-dns.net`, *`ns3-01.azure-dns.org`, and `ns4-01.azure-dns.info`: +1. Retrieve the name servers from the DNS zone page. In this example, the zone `contoso.net` has been assigned name servers `ns1-01.azure-dns.com`, `ns2-01.azure-dns.net`, `ns3-01.azure-dns.org`, and `ns4-01.azure-dns.info`: - ![List of name servers](./media/dns-delegate-domain-azure-dns/viewzonens500.png) + :::image type="content" source="./media/dns-delegate-domain-azure-dns/dns-name-servers.png" alt-text="Screenshot of D N S zone showing name servers" lightbox="./media/dns-delegate-domain-azure-dns/dns-name-servers.png"::: Azure DNS automatically creates authoritative NS records in your zone for the assigned name servers. @@ -110,9 +110,15 @@ You don't have to specify the Azure DNS name servers. If the delegation is set u ## Clean up resources -You can keep the **contosoRG** resource group if you intend to do the next tutorial. Otherwise, delete the **contosoRG** resource group to delete the resources created in this tutorial. +When no longer needed, you can delete all resources created in this tutorial by following these steps to delete the resource group **ContosoRG**: + +1. From the left-hand menu, select **Resource groups**. + +2. Select the **ContosoRG** resource group. + +3. Select **Delete resource group**. -Select the **contosoRG** resource group, and then select **Delete resource group**. +4. Enter **ContosoRG** and select **Delete**. ## Next steps diff --git a/articles/dns/dns-private-resolver-overview.md b/articles/dns/dns-private-resolver-overview.md index 9faafdf4d74c..a5343855c986 100644 --- a/articles/dns/dns-private-resolver-overview.md +++ b/articles/dns/dns-private-resolver-overview.md @@ -6,7 +6,7 @@ ms.custom: references_regions author: greg-lindsay ms.service: dns ms.topic: overview -ms.date: 05/10/2022 +ms.date: 05/25/2022 ms.author: greglin #Customer intent: As an administrator, I want to evaluate Azure DNS Private Resolver so I can determine if I want to use it instead of my current DNS resolver service. --- @@ -20,12 +20,12 @@ Azure DNS Private Resolver is a new service that enables you to query Azure DNS ## How does it work? -Azure DNS Private Resolver requires an [Azure Virtual Network](/azure/virtual-network/virtual-networks-overview). When you create an Azure DNS Private Resolver inside a virtual network, one or more [inbound endpoints](#inbound-endpoints) are established that can be used as the destination for DNS queries. The resolver's [outbound endpoint](#outbound-endpoints) processes DNS queries based on a [DNS forwarding ruleset](#dns-forwarding-rulesets) that you configure. DNS queries that are initiated in networks linked to a ruleset can be sent to other DNS servers. +Azure DNS Private Resolver requires an [Azure Virtual Network](../virtual-network/virtual-networks-overview.md). When you create an Azure DNS Private Resolver inside a virtual network, one or more [inbound endpoints](#inbound-endpoints) are established that can be used as the destination for DNS queries. The resolver's [outbound endpoint](#outbound-endpoints) processes DNS queries based on a [DNS forwarding ruleset](#dns-forwarding-rulesets) that you configure. DNS queries that are initiated in networks linked to a ruleset can be sent to other DNS servers. The DNS query process when using an Azure DNS Private Resolver is summarized below: 1. A client in a virtual network issues a DNS query. -2. If the DNS servers for this virtual network are [specified as custom](/azure/virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances#specify-dns-servers), then the query is forwarded to the specified IP addresses. +2. If the DNS servers for this virtual network are [specified as custom](../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#specify-dns-servers), then the query is forwarded to the specified IP addresses. 3. If Default (Azure-provided) DNS servers are configured in the virtual network, and there are Private DNS zones [linked to the same virtual network](private-dns-virtual-network-links.md), these zones are consulted. 4. If the query doesn't match a Private DNS zone linked to the virtual network, then [Virtual network links](#virtual-network-links) for [DNS forwarding rulesets](#dns-forwarding-rulesets) are consulted. 5. If no ruleset links are present, then Azure DNS is used to resolve the query. @@ -34,7 +34,7 @@ The DNS query process when using an Azure DNS Private Resolver is summarized bel 8. If multiple matches are present, the longest suffix is used. 9. If no match is found, no DNS forwarding occurs and Azure DNS is used to resolve the query. -The architecture for Azure DNS Private Resolver is summarized in the following figure. DNS resolution between Azure virtual networks and on-premises networks requires [Azure ExpressRoute](/azure/expressroute/expressroute-introduction) or a [VPN](/azure/vpn-gateway/vpn-gateway-about-vpngateways). +The architecture for Azure DNS Private Resolver is summarized in the following figure. DNS resolution between Azure virtual networks and on-premises networks requires [Azure ExpressRoute](../expressroute/expressroute-introduction.md) or a [VPN](../vpn-gateway/vpn-gateway-about-vpngateways.md). [ ![Azure DNS Private Resolver architecture](./media/dns-resolver-overview/resolver-architecture.png) ](./media/dns-resolver-overview/resolver-architecture.png#lightbox) @@ -116,14 +116,8 @@ Subnets used for DNS resolver have the following limitations: Outbound endpoints have the following limitations: - An outbound endpoint can't be deleted unless the DNS forwarding ruleset and the virtual network links under it are deleted -### DNS forwarding ruleset restrictions - -DNS forwarding rulesets have the following limitations: -- A DNS forwarding ruleset can't be deleted unless the virtual network links under it are deleted - ### Other restrictions -- DNS resolver endpoints can't be updated to include IP configurations from a different subnet - IPv6 enabled subnets aren't supported in Public Preview @@ -131,4 +125,4 @@ DNS forwarding rulesets have the following limitations: * Learn how to create an Azure DNS Private Resolver by using [Azure PowerShell](./dns-private-resolver-get-started-powershell.md) or [Azure portal](./dns-private-resolver-get-started-portal.md). * Learn about some of the other key [networking capabilities](../networking/fundamentals/networking-overview.md) of Azure. -* [Learn module: Introduction to Azure DNS](/learn/modules/intro-to-azure-dns). +* [Learn module: Introduction to Azure DNS](/learn/modules/intro-to-azure-dns). \ No newline at end of file diff --git a/articles/dns/media/dns-delegate-domain-azure-dns/create-dns-zone-lb.png b/articles/dns/media/dns-delegate-domain-azure-dns/create-dns-zone-lb.png index ea3bb604bada..95c1401e758f 100644 Binary files a/articles/dns/media/dns-delegate-domain-azure-dns/create-dns-zone-lb.png and b/articles/dns/media/dns-delegate-domain-azure-dns/create-dns-zone-lb.png differ diff --git a/articles/dns/media/dns-delegate-domain-azure-dns/dns-name-servers.png b/articles/dns/media/dns-delegate-domain-azure-dns/dns-name-servers.png new file mode 100644 index 000000000000..d6216e9cb120 Binary files /dev/null and b/articles/dns/media/dns-delegate-domain-azure-dns/dns-name-servers.png differ diff --git a/articles/dns/media/dns-resolver-overview/resolver-architecture.png b/articles/dns/media/dns-resolver-overview/resolver-architecture.png index 167fcb9e3d6a..30519825435e 100644 Binary files a/articles/dns/media/dns-resolver-overview/resolver-architecture.png and b/articles/dns/media/dns-resolver-overview/resolver-architecture.png differ diff --git a/articles/event-grid/event-schema-blob-storage.md b/articles/event-grid/event-schema-blob-storage.md index a8a6178ab979..2d4387287a28 100644 --- a/articles/event-grid/event-schema-blob-storage.md +++ b/articles/event-grid/event-schema-blob-storage.md @@ -2,7 +2,7 @@ title: Azure Blob Storage as Event Grid source description: Describes the properties that are provided for blob storage events with Azure Event Grid ms.topic: conceptual -ms.date: 09/08/2021 +ms.date: 05/26/2022 --- # Azure Blob Storage as an Event Grid source @@ -45,7 +45,16 @@ These events are triggered if you enable a hierarchical namespace on the storage > [!NOTE] > For **Azure Data Lake Storage Gen2**, if you want to ensure that the **Microsoft.Storage.BlobCreated** event is triggered only when a Block Blob is completely committed, filter the event for the `FlushWithClose` REST API call. This API call triggers the **Microsoft.Storage.BlobCreated** event only after data is fully committed to a Block Blob. To learn how to create a filter, see [Filter events for Event Grid](./how-to-filter-events.md). -## Example event +### List of policy-related events + +These events are triggered when the actions defined by a policy are performed. + + |Event name |Description| + |----------|-----------| + |**Microsoft.Storage.BlobInventoryPolicyCompleted** |Triggered when the inventory run completes for a rule that is defined an inventory policy . This event also occurs if the inventory run fails with a user error before it starts to run. For example, an invalid policy, or an error that occurs when a destination container is not present will trigger the event. | + |**Microsoft.Storage.LifecyclePolicyCompleted** |Triggered when the actions defined by a lifecycle management policy are performed. | + +## Example events When an event is triggered, the Event Grid service sends data about that event to subscribing endpoint. This section contains an example of what that data would look like for each blob storage event. # [Event Grid event schema](#tab/event-grid-event-schema) @@ -334,6 +343,61 @@ If the blob storage account has a hierarchical namespace, the data looks similar }] ``` +### Microsoft.Storage.BlobInventoryPolicyCompleted event + +```json +{ + "topic": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/BlobInventory/providers/Microsoft.EventGrid/topics/BlobInventoryTopic", + "subject": "BlobDataManagement/BlobInventory", + "eventType": "Microsoft.Storage.BlobInventoryPolicyCompleted", + "id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "data": { + "scheduleDateTime": "2021-05-28T03:50:27Z", + "accountName": "testaccount", + "ruleName": "Rule_1", + "policyRunStatus": "Succeeded", + "policyRunStatusMessage": "Inventory run succeeded, refer manifest file for inventory details.", + "policyRunId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "manifestBlobUrl": "https://testaccount.blob.core.windows.net/inventory-destination-container/2021/05/26/13-25-36/Rule_1/Rule_1.csv" + }, + "dataVersion": "1.0", + "metadataVersion": "1", + "eventTime": "2021-05-28T15:03:18Z" +} +``` + +### Microsoft.Storage.LifecyclePolicyCompleted event + +```json +{ + "topic": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/contosoresourcegroup/providers/Microsoft.Storage/storageAccounts/contosostorageaccount", + "subject": "BlobDataManagement/LifeCycleManagement/SummaryReport", + "eventType": "Microsoft.Storage.LifecyclePolicyCompleted", + "id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "data": { + "scheduleTime": "2022/05/24 22:57:29.3260160", + "deleteSummary": { + "totalObjectsCount": 16, + "successCount": 14, + "errorList": "" + }, + "tierToCoolSummary": { + "totalObjectsCount": 0, + "successCount": 0, + "errorList": "" + }, + "tierToArchiveSummary": { + "totalObjectsCount": 0, + "successCount": 0, + "errorList": "" + } + }, + "dataVersion": "1", + "metadataVersion": "1", + "eventTime": "2022-05-26T00:00:40.1880331" +} +``` + # [Cloud event schema](#tab/cloud-event-schema) ### Microsoft.Storage.BlobCreated event @@ -554,7 +618,6 @@ If the blob storage account has a hierarchical namespace, the data looks similar --- - ## Event properties # [Event Grid event schema](#tab/event-grid-event-schema) diff --git a/articles/event-grid/receive-events.md b/articles/event-grid/receive-events.md index 30df54725e26..fd0bba9870e0 100644 --- a/articles/event-grid/receive-events.md +++ b/articles/event-grid/receive-events.md @@ -219,7 +219,7 @@ module.exports = function (context, req) { ### Test Blob Created event handling -Test the new functionality of the function by putting a [Blob storage event](./event-schema-blob-storage.md#example-event) into the test field and running: +Test the new functionality of the function by putting a [Blob storage event](./event-schema-blob-storage.md#example-events) into the test field and running: ```json [{ diff --git a/articles/expressroute/designing-for-disaster-recovery-with-expressroute-privatepeering.md b/articles/expressroute/designing-for-disaster-recovery-with-expressroute-privatepeering.md index c2ed6c4d7659..6cf51fea9fe3 100644 --- a/articles/expressroute/designing-for-disaster-recovery-with-expressroute-privatepeering.md +++ b/articles/expressroute/designing-for-disaster-recovery-with-expressroute-privatepeering.md @@ -5,7 +5,7 @@ services: expressroute author: duongau ms.service: expressroute ms.topic: article -ms.date: 05/09/2022 +ms.date: 05/25/2022 ms.author: duau --- @@ -21,14 +21,20 @@ However, taking Murphy's popular adage--*if anything can go wrong, it will*--int ## Need for redundant connectivity solution -There are possibilities and instances where an entire regional service (be it that of Microsoft, network service providers, customer, or other cloud service providers) gets degraded. The root cause for such regional wide service impact include natural calamity. That's why, for business continuity and mission critical applications it's important to plan for disaster recovery. +There are possibilities and instances where an ExpressRoute peering locations or an entire regional service (be it that of Microsoft, network service providers, customer, or other cloud service providers) gets degraded. The root cause for such regional wide service impact include natural calamity. That's why, for business continuity and mission critical applications it's important to plan for disaster recovery. No matter what, whether you run your mission critical applications in an Azure region or on-premises or anywhere else, you can use another Azure region as your failover site. The following articles addresses disaster recovery from applications and frontend access perspectives: - [Enterprise-scale disaster recovery][Enterprise DR] - [SMB disaster recovery with Azure Site Recovery][SMB DR] -If you rely on ExpressRoute connectivity between your on-premises network and Microsoft for mission critical operations, your disaster recovery plan should also include geo-redundant network connectivity. +If you rely on ExpressRoute connectivity between your on-premises network and Microsoft for mission critical operations, you need to consider the following to plan for disaster recovery over ExpressRoute + +- using geo-redundant ExpressRoute circuits +- using diverse service provider network(s) for different ExpressRoute circuit +- designing each of the ExpressRoute circuit for [high availability][HA] +- terminating the different ExpressRoute circuit in different location on the customer network +- using [Availability zone aware ExpressRoute Virtual Network Gateways](/articles/vpn-gateway/about-zone-redundant-vnet-gateways.md) ## Challenges of using multiple ExpressRoute circuits @@ -36,14 +42,17 @@ When you interconnect the same set of networks using more than one connection, y However, if you load balance traffic across geo-redundant parallel paths, regardless of whether you have stateful entities or not, you would experience inconsistent network performance. These geo-redundant parallel paths can be through the same metro or different metro found on the [providers by location](expressroute-locations-providers.md#partners) page. -### Same metro +### Redundancy with ExpressRoute circuits in same metro -[Many metros](expressroute-locations-providers.md#global-commercial-azure) have two ExpressRoute locations. An example would be *Amsterdam* and *Amsterdam2*. When designing redundancy, you could build two parallel paths to Azure with both locations in the same metro. The advantage of this design is when application failover happens, end-to-end latency between your on-premises applications and Microsoft stays approximately the same. However, if there is a natural disaster such as an earthquake, connectivity for both paths may no longer be available. +[Many metros](expressroute-locations-providers.md#global-commercial-azure) have two ExpressRoute locations. An example would be *Amsterdam* and *Amsterdam2*. When designing redundancy, you could build two parallel paths to Azure with both locations in the same metro. You could do this with the same provider or choose to work with a different service provider to improve resiliency. Another advantage of this design is when application failover happens, end-to-end latency between your on-premises applications and Microsoft stays approximately the same. However, if there is a natural disaster such as an earthquake, connectivity for both paths may no longer be available. -### Different metros +### Redundancy with ExpressRoute circuits in different metros When using different metros for redundancy, you should select the secondary location in the same [geo-political region](expressroute-locations-providers.md#locations). To choose a location outside of the geo-political region, you'll need to use Premium SKU for both circuits in the parallel paths. The advantage of this configuration is the chances of a natural disaster causing an outage to both links are much lower but at the cost of increased latency end-to-end. +>[!NOTE] +>Enabling BFD on the ExpressRoute circuits will help with faster link failure detection between Microsoft Enterprise Edge (MSEE) devices and the Customer/Partner Edge routers. However, the overall failover and convergence to redundant site may take up to 180 seconds under some failure conditions and you may experience increased laterncy or performance degradation during this time. + In this article, let's discuss how to address challenges you may face when configuring geo-redundant paths. ## Small to medium on-premises network considerations @@ -52,13 +61,6 @@ Let's consider the example network illustrated in the following diagram. In the :::image type="content" source="./media/designing-for-disaster-recovery-with-expressroute-pvt/one-region.png" alt-text="Diagram of small to medium size on-premises network considerations."::: -When you are designing ExpressRoute connectivity for disaster recovery, you need to consider: - -- using geo-redundant ExpressRoute circuits -- using diverse service provider network(s) for different ExpressRoute circuit -- designing each of the ExpressRoute circuit for [high availability][HA] -- terminating the different ExpressRoute circuit in different location on the customer network - By default, if you advertise routes identically over all the ExpressRoute paths, Azure will load-balance on-premises bound traffic across all the ExpressRoute paths using Equal-cost multi-path (ECMP) routing. However, with the geo-redundant ExpressRoute circuits we need to take into consideration different network performances with different network paths (particularly for network latency). To get more consistent network performance during normal operation, you may want to prefer the ExpressRoute circuit that offers the minimal latency. @@ -134,7 +136,7 @@ The Scenario 2 is illustrated in the following diagram. In the diagram, green li The solution is illustrated in the following diagram. As illustrated, you can architect the scenario either using more specific route (Option 1) or AS-path prepend (Option 2) to influence VNet path selection. To influence on-premises network route selection for Azure bound traffic, you need configure the interconnection between the on-premises location as less preferable. How you configure the interconnection link as preferable depends on the routing protocol used within the on-premises network. You can use local preference with iBGP or metric with IGP (OSPF or IS-IS). -:::image type="content" source="./media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-arch2.png" alt-text="Diagram of active-active ExpressRoute circuits solution 2."::: +:::image type="content" source="./media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png" alt-text="Diagram of active-active ExpressRoute circuits solution 2."::: > [!IMPORTANT] > When one or multiple ExpressRoute circuits are connected to multiple virtual networks, virtual network to virtual network traffic can route via ExpressRoute. However, this is not recommended. To enable virtual network to virtual network connectivity, [configure virtual network peering](../virtual-network/virtual-network-manage-peering.md). diff --git a/articles/expressroute/expressroute-faqs.md b/articles/expressroute/expressroute-faqs.md index cca8c8c78f3a..c015b47f32d8 100644 --- a/articles/expressroute/expressroute-faqs.md +++ b/articles/expressroute/expressroute-faqs.md @@ -441,7 +441,7 @@ Your existing circuit will continue advertising the prefixes for Microsoft 365. * Microsoft peering of ExpressRoute circuits that are configured on or after August 1, 2017 will not have any prefixes advertised until a route filter is attached to the circuit. You will see no prefixes by default. ### If I have multiple Virtual Networks (Vnets) connected to the same ExpressRoute circuit, can I use ExpressRoute for Vnet-to-Vnet connectivity? -Vnet-to-Vnet connectivity over ExpressRoute is not recommended. To acheive this, configure [Virtual Network Peering](https://docs.microsoft.com/azure/virtual-network/virtual-network-peering-overview?msclkid=b64a7b6ac19e11eca60d5e3e5d0764f5). +Vnet-to-Vnet connectivity over ExpressRoute is not recommended. To acheive this, configure [Virtual Network Peering](../virtual-network/virtual-network-peering-overview.md?msclkid=b64a7b6ac19e11eca60d5e3e5d0764f5). ## ExpressRoute Direct @@ -455,4 +455,4 @@ Vnet-to-Vnet connectivity over ExpressRoute is not recommended. To acheive this, ### Does the ExpressRoute service store customer data? -No. +No. \ No newline at end of file diff --git a/articles/expressroute/expressroute-locations-providers.md b/articles/expressroute/expressroute-locations-providers.md index 1758a948e9c4..6715021659c1 100644 --- a/articles/expressroute/expressroute-locations-providers.md +++ b/articles/expressroute/expressroute-locations-providers.md @@ -6,7 +6,7 @@ author: duongau ms.service: expressroute ms.topic: conceptual -ms.date: 01/24/2022 +ms.date: 05/24/2022 ms.author: duau ms.custom: references_regions @@ -109,7 +109,7 @@ The following table shows connectivity locations and the service providers for e | **Rio de Janeiro** | [Equinix-RJ2](https://www.equinix.com/locations/americas-colocation/brazil-colocation/rio-de-janeiro-data-centers/rj2/) | 3 | Brazil Southeast | Supported | Equinix | | **San Antonio** | [CyrusOne SA1](https://cyrusone.com/locations/texas/san-antonio-texas/) | 1 | South Central US | Supported | CenturyLink Cloud Connect, Megaport, Zayo | | **Sao Paulo** | [Equinix SP2](https://www.equinix.com/locations/americas-colocation/brazil-colocation/sao-paulo-data-centers/sp2/) | 3 | Brazil South | Supported | Aryaka Networks, Ascenty Data Centers, British Telecom, Equinix, InterCloud, Level 3 Communications, Neutrona Networks, Orange, Tata Communications, Telefonica, UOLDIVEO | -| **Sao Paulo2** | [TIVIT TSM](https://www.tivit.com/en/tivit/) | 3 | Brazil South | Supported | Ascenty Data Centers | +| **Sao Paulo2** | [TIVIT TSM](https://www.tivit.com/en/tivit/) | 3 | Brazil South | Supported | Ascenty Data Centers, Tivit | | **Seattle** | [Equinix SE2](https://www.equinix.com/locations/americas-colocation/united-states-colocation/seattle-data-centers/se2/) | 1 | West US 2 | Supported | Aryaka Networks, CenturyLink Cloud Connect, Equinix, Level 3 Communications, Megaport, Telus, Zayo | | **Seoul** | [KINX Gasan IDC](https://www.kinx.net/?lang=en) | 2 | Korea Central | Supported | KINX, KT, LG CNS, LGUplus, Equinix, Sejong Telecom, SK Telecom | | **Seoul2** | [KT IDC](https://www.kt-idc.com/eng/introduce/sub1_4_10.jsp#tab) | 2 | Korea Central | n/a | KT | diff --git a/articles/expressroute/expressroute-locations.md b/articles/expressroute/expressroute-locations.md index e40c60784522..b49b0ab2309b 100644 --- a/articles/expressroute/expressroute-locations.md +++ b/articles/expressroute/expressroute-locations.md @@ -6,7 +6,7 @@ author: duongau ms.service: expressroute ms.topic: conceptual ms.workload: infrastructure-services -ms.date: 01/31/2022 +ms.date: 05/24/2022 ms.author: duau ms.custom: references_regions @@ -159,6 +159,7 @@ The following table shows locations by service provider. If you want to view ava | **[Transtelco](https://transtelco.net/enterprise-services/)** |Supported |Supported | Dallas, Queretaro(Mexico)| | **[T-Mobile/Sprint](https://www.t-mobile.com/business/solutions/networking/cloud-networking )** |Supported |Supported | Chicago, Silicon Valley, Washington DC | | **[T-Systems](https://geschaeftskunden.telekom.de/vernetzung-digitalisierung/produkt/intraselect)** |Supported |Supported | Frankfurt | +| **[Tivit](https://www.tivit.com/cloud-solutions/public-cloud/public-cloud-azure/)** |Supported |Supported | Sao Paulo2 | | **[UOLDIVEO](https://www.uoldiveo.com.br/)** |Supported |Supported | Sao Paulo | | **[UIH](https://www.uih.co.th/en/network-solutions/global-network/cloud-direct-for-microsoft-azure-expressroute)** | Supported | Supported | Bangkok | | **[Verizon](https://enterprise.verizon.com/products/network/application-enablement/secure-cloud-interconnect/)** |Supported |Supported | Amsterdam, Chicago, Dallas, Hong Kong SAR, London, Mumbai, Silicon Valley, Singapore, Sydney, Tokyo, Toronto, Washington DC | diff --git a/articles/expressroute/media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png b/articles/expressroute/media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png index 3263c550229b..886c5a945f16 100644 Binary files a/articles/expressroute/media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png and b/articles/expressroute/media/designing-for-disaster-recovery-with-expressroute-pvt/multi-region-sol2.png differ diff --git a/articles/firewall/firewall-preview.md b/articles/firewall/firewall-preview.md index 8d66e47b0e09..4934c9811b29 100644 --- a/articles/firewall/firewall-preview.md +++ b/articles/firewall/firewall-preview.md @@ -5,7 +5,7 @@ services: firewall author: vhorne ms.service: firewall ms.topic: conceptual -ms.date: 03/04/2022 +ms.date: 05/25/2022 ms.author: victorh --- @@ -61,7 +61,7 @@ Unregister-AzProviderFeature -FeatureName AFWEnableNetworkRuleNameLogging -Provi As more applications move to the cloud, the performance of the network elements can become a bottleneck. As the central piece of any network design, the firewall needs to support all the workloads. The Azure Firewall Premium performance boost feature allows more scalability for these deployments. -This feature significantly increases the throughput of Azure Firewall Premium. For more details, see [Azure Firewall performance](firewall-performance.md). +This feature significantly increases the throughput of Azure Firewall Premium. For more information, see [Azure Firewall performance](firewall-performance.md). To enable the Azure Firewall Premium Performance boost feature, run the following commands in Azure PowerShell. Stop and start the firewall for the feature to take effect immediately. Otherwise, the firewall/s is updated with the feature within several days. @@ -82,6 +82,53 @@ Run the following Azure PowerShell command to turn off this feature: Unregister-AzProviderFeature -FeatureName AFWEnableAccelnet -ProviderNamespace Microsoft.Network ``` +### IDPS Private IP ranges (preview) + +In Azure Firewall Premium IDPS, private IP address ranges are used to identify if traffic is inbound, outbound, or internal (East-West). Each signature is applied on specific traffic direction, as indicated in the signature rules table. By default, only ranges defined by IANA RFC 1918 are considered private IP addresses. So traffic sent from a private IP address range to a private IP address range is considered internal. To modify your private IP addresses, you can now easily edit, remove, or add ranges as needed. + +:::image type="content" source="media/firewall-preview/idps-private-ip.png" alt-text="Screenshot showing I D P S private IP address ranges."::: + +### Structured firewall logs (preview) + +Today, the following diagnostic log categories are available for Azure Firewall: +- Application rule log +- Network rule log +- DNS proxy log + +These log categories use [Azure diagnostics mode](../azure-monitor/essentials/resource-logs.md#azure-diagnostics-mode). In this mode, all data from any diagnostic setting will be collected in the [AzureDiagnostics](/azure/azure-monitor/reference/tables/azurediagnostics) table. + +With this new feature, you'll be able to choose to use [Resource Specific Tables](../azure-monitor/essentials/resource-logs.md#resource-specific) instead of the existing [AzureDiagnostics](/azure/azure-monitor/reference/tables/azurediagnostics) table. In case both sets of logs are required, at least two diagnostic settings need to be created per firewall. + +In **Resource specific** mode, individual tables in the selected workspace are created for each category selected in the diagnostic setting. This method is recommended since it: +- makes it much easier to work with the data in log queries +- makes it easier to discover schemas and their structure +- improves performance across both ingestion latency and query times +- allows you to grant Azure RBAC rights on a specific table + +New resource specific tables are now available in Diagnostic setting that allows you to utilize the following newly added categories: + +- [Network rule log](/azure/azure-monitor/reference/tables/azfwnetworkrule) - Contains all Network Rule log data. Each match between data plane and network rule creates a log entry with the data plane packet and the matched rule's attributes. +- [NAT rule log](/azure/azure-monitor/reference/tables/azfwnatrule) - Contains all DNAT (Destination Network Address Translation) events log data. Each match between data plane and DNAT rule creates a log entry with the data plane packet and the matched rule's attributes. +- [Application rule log](/azure/azure-monitor/reference/tables/azfwapplicationrule) - Contains all Application rule log data. Each match between data plane and Application rule creates a log entry with the data plane packet and the matched rule's attributes. +- [Threat Intelligence log](/azure/azure-monitor/reference/tables/azfwthreatintel) - Contains all Threat Intelligence events. +- [IDPS log](/azure/azure-monitor/reference/tables/azfwidpssignature) - Contains all data plane packets that were matched with one or more IDPS signatures. +- [DNS proxy log](/azure/azure-monitor/reference/tables/azfwdnsquery) - Contains all DNS Proxy events log data. +- [Internal FQDN resolve failure log](/azure/azure-monitor/reference/tables/azfwinternalfqdnresolutionfailure) - Contains all internal Firewall FQDN resolution requests that resulted in failure. +- [Application rule aggregation log](/azure/azure-monitor/reference/tables/azfwapplicationruleaggregation) - Contains aggregated Application rule log data for Policy Analytics. +- [Network rule aggregation log](/azure/azure-monitor/reference/tables/azfwnetworkruleaggregation) - Contains aggregated Network rule log data for Policy Analytics. +- [NAT rule aggregation log](/azure/azure-monitor/reference/tables/azfwnatruleaggregation) - Contains aggregated NAT rule log data for Policy Analytics. + +By default, the new resource specific tables are disabled. Open a support ticket to enable the functionality in your environment. + +In addition, when setting up your log analytics workspace, you must select whether you want to work with the AzureDiagnostics table (default) or with Resource Specific Tables. + +Additional KQL log queries were added (as seen in the following screenshot) to query structured firewall logs. + +:::image type="content" source="media/firewall-preview/resource-specific-tables.png" alt-text="Screenshot showing Firewall logs Resource Specific Tables." lightbox="media/firewall-preview/resource-specific-tables-zoom.png"::: + +> [!NOTE] +> Existing Workbooks and any Sentinel integration will be adjusted to support the new structured logs when **Resource Specific** mode is selected. + ## Next steps To learn more about Azure Firewall, see [What is Azure Firewall?](overview.md). \ No newline at end of file diff --git a/articles/firewall/media/firewall-preview/idps-private-ip.png b/articles/firewall/media/firewall-preview/idps-private-ip.png new file mode 100644 index 000000000000..36dae79fb660 Binary files /dev/null and b/articles/firewall/media/firewall-preview/idps-private-ip.png differ diff --git a/articles/firewall/media/firewall-preview/resource-specific-tables-zoom.png b/articles/firewall/media/firewall-preview/resource-specific-tables-zoom.png new file mode 100644 index 000000000000..cfdba32c7882 Binary files /dev/null and b/articles/firewall/media/firewall-preview/resource-specific-tables-zoom.png differ diff --git a/articles/firewall/media/firewall-preview/resource-specific-tables.png b/articles/firewall/media/firewall-preview/resource-specific-tables.png new file mode 100644 index 000000000000..606bef1f2a40 Binary files /dev/null and b/articles/firewall/media/firewall-preview/resource-specific-tables.png differ diff --git a/articles/firewall/overview.md b/articles/firewall/overview.md index 47dd835cb6e0..7c2224a7f339 100644 --- a/articles/firewall/overview.md +++ b/articles/firewall/overview.md @@ -7,7 +7,7 @@ ms.service: firewall services: firewall ms.topic: overview ms.custom: mvc, contperf-fy21q1 -ms.date: 05/12/2022 +ms.date: 05/26/2022 # Customer intent: As an administrator, I want to evaluate Azure Firewall so I can determine if I want to use it. --- @@ -102,7 +102,6 @@ Azure Firewall Standard has the following known issues: | Error encountered when creating more than 2000 rule collections. | The maximal number of NAT/Application or Network rule collections is 2000 (Resource Manager limit). | This is a current limitation. | |Unable to see Network Rule Name in Azure Firewall Logs|Azure Firewall network rule log data does not show the Rule name for network traffic.|Network rule name logging is in preview. For for information, see [Azure Firewall preview features](firewall-preview.md#network-rule-name-logging-preview).| |XFF header in HTTP/S|XFF headers are overwritten with the original source IP address as seen by the firewall. This is applicable for the following use cases:
    - HTTP requests
    - HTTPS requests with TLS termination|A fix is being investigated.| -| Firewall logs (Resource specific tables - Preview) | Resource specific log queries are in preview mode and aren't currently supported. | A fix is being investigated.| |Can't upgrade to Premium with Availability Zones in the Southeast Asia region|You can't currently upgrade to Azure Firewall Premium with Availability Zones in the Southeast Asia region.|Deploy a new Premium firewall in Southeast Asia without Availability Zones, or deploy in a region that supports Availability Zones.| |Can’t deploy Firewall with Availability Zones with a newly created Public IP address|When you deploy a Firewall with Availability Zones, you can’t use a newly created Public IP address.|First create a new zone redundant Public IP address, then assign this previously created IP address during the Firewall deployment. diff --git a/articles/firewall/premium-features.md b/articles/firewall/premium-features.md index d8a8f6f3d975..4cb6435e959e 100644 --- a/articles/firewall/premium-features.md +++ b/articles/firewall/premium-features.md @@ -5,7 +5,7 @@ author: vhorne ms.service: firewall services: firewall ms.topic: conceptual -ms.date: 03/30/2022 +ms.date: 05/25/2022 ms.author: victorh ms.custom: references_regions --- @@ -56,7 +56,7 @@ To learn more about Azure Firewall Premium Intermediate CA certificate requireme A network intrusion detection and prevention system (IDPS) allows you to monitor your network for malicious activity, log information about this activity, report it, and optionally attempt to block it. -Azure Firewall Premium provides signature-based IDPS to allow rapid detection of attacks by looking for specific patterns, such as byte sequences in network traffic, or known malicious instruction sequences used by malware. The IDPS signatures are applicable for both application and network level traffic (Layers 4-7), they're fully managed, and continuously updated. IDPS can be applied to inbound, spoke-to-spoke (East-West), and outbound traffic. Spoke-to-spoke (East-West) includes traffic that goes from/to an on-premises network. +Azure Firewall Premium provides signature-based IDPS to allow rapid detection of attacks by looking for specific patterns, such as byte sequences in network traffic, or known malicious instruction sequences used by malware. The IDPS signatures are applicable for both application and network level traffic (Layers 4-7), they're fully managed, and continuously updated. IDPS can be applied to inbound, spoke-to-spoke (East-West), and outbound traffic. Spoke-to-spoke (East-West) includes traffic that goes from/to an on-premises network. You can configure your IDPS private IP address ranges using the **Private IP ranges** preview feature. For more information, see [Azure Firewall preview features](firewall-preview.md#idps-private-ip-ranges-preview). The Azure Firewall signatures/rulesets include: - An emphasis on fingerprinting actual malware, Command and Control, exploit kits, and in the wild malicious activity missed by traditional prevention methods. diff --git a/articles/frontdoor/standard-premium/concept-endpoint-manager.md b/articles/frontdoor/standard-premium/concept-endpoint-manager.md index 6d04fd9b4fe0..e983a83246eb 100644 --- a/articles/frontdoor/standard-premium/concept-endpoint-manager.md +++ b/articles/frontdoor/standard-premium/concept-endpoint-manager.md @@ -10,10 +10,10 @@ ms.date: 02/18/2021 ms.author: qixwang --- -# What is Azure Front Door Standard/Premium (Preview) Endpoint Manager? +# What is Azure Front Door Standard/Premium Endpoint Manager? > [!NOTE] -> * This documentation is for Azure Front Door Standard/Premium (Preview). Looking for information on Azure Front Door? View [Azure Front Door Docs](../front-door-overview.md). +> * This documentation is for Azure Front Door Standard/Premium. Looking for information on Azure Front Door? View [Azure Front Door Docs](../front-door-overview.md). Endpoint Manager provides an overview of endpoints you've configured for your Azure Front Door. An endpoint is a logical grouping of a domains and their associated configurations. Endpoint Manager helps you manage your collection of endpoints for CRUD (create, read, update, and delete) operation. You can manage the following elements for your endpoints through Endpoint Manager: @@ -26,11 +26,6 @@ Endpoint Manager provides an overview of endpoints you've configured for your Az Endpoint Manager list how many instances of each element are created within an endpoint. The association status for each element will also be displayed. For example, you may create multiple domains and origin groups, and assign the association between them with different routes. -> [!IMPORTANT] -> * Azure Front Door Standard/Premium (Preview) is currently in public preview. -> This preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. -> For more information, see [**Supplemental Terms of Use for Microsoft Azure Previews**](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). - ## Linked view With the linked view within Endpoint Manager, you could easily identify the association between your Azure Front Door elements, such as: diff --git a/articles/frontdoor/standard-premium/how-to-configure-https-custom-domain.md b/articles/frontdoor/standard-premium/how-to-configure-https-custom-domain.md index bbca7cba28a9..de723e39d9c4 100644 --- a/articles/frontdoor/standard-premium/how-to-configure-https-custom-domain.md +++ b/articles/frontdoor/standard-premium/how-to-configure-https-custom-domain.md @@ -142,7 +142,7 @@ If you want to change the secret version from ‘Latest’ to a specified versio > [!NOTE] > * It may take up to an hour for the new certificate to be deployed when you switch between certificate types. - > * If your domain state is Approved, switching the certificate type between BYOC and managed certificate won't have any downtime. Whhen switching to managed certificate, unless the domain ownership is re-validated and the domain state becomes Approved, you will continue to be served by the previous certificate. + > * If your domain state is Approved, switching the certificate type between BYOC and managed certificate won't have any downtime. When switching to managed certificate, unless the domain ownership is re-validated and the domain state becomes Approved, you will continue to be served by the previous certificate. > * If you switch from BYOC to managed certificate, domain re-validation is required. If you switch from managed certificate to BYOC, you're not required to re-validate the domain. > diff --git a/articles/governance/management-groups/overview.md b/articles/governance/management-groups/overview.md index fc43eec5c7a7..283e91a67480 100644 --- a/articles/governance/management-groups/overview.md +++ b/articles/governance/management-groups/overview.md @@ -1,7 +1,7 @@ --- title: Organize your resources with management groups - Azure Governance description: Learn about the management groups, how their permissions work, and how to use them. -ms.date: 05/12/2022 +ms.date: 05/25/2022 ms.topic: overview author: timwarner-msft ms.author: timwarner @@ -314,6 +314,9 @@ management group. When looking to query on management groups outside the Azure portal, the target scope for management groups looks like **"/providers/Microsoft.Management/managementGroups/{_management-group-id_}"**. +> [!NOTE] +> Using the Azure Resource Manager REST API, you can enable diagnostic settings on a management group to send related Azure Activity log entries to a Log Analytics workspace, Azure Storage, or Azure Event Hub. For more information, see [Management Group Diagnostic Settings - Create Or Update](https://docs.microsoft.com/rest/api/monitor/management-group-diagnostic-settings/create-or-update). + ## Next steps To learn more about management groups, see: diff --git a/articles/governance/policy/concepts/exemption-structure.md b/articles/governance/policy/concepts/exemption-structure.md index eb4f62308761..42e2d95607ed 100644 --- a/articles/governance/policy/concepts/exemption-structure.md +++ b/articles/governance/policy/concepts/exemption-structure.md @@ -151,10 +151,10 @@ assignment. ## Next steps -- Study the [Microsoft.Authorization policyExemptions resource type](https://docs.microsoft.com/azure/templates/microsoft.authorization/policyexemptions?tabs=json). +- Study the [Microsoft.Authorization policyExemptions resource type](/azure/templates/microsoft.authorization/policyexemptions?tabs=json). - Learn about the [policy definition structure](./definition-structure.md). - Understand how to [programmatically create policies](../how-to/programmatically-create.md). - Learn how to [get compliance data](../how-to/get-compliance-data.md). - Learn how to [remediate non-compliant resources](../how-to/remediate-resources.md). - Review what a management group is with - [Organize your resources with Azure management groups](../../management-groups/overview.md). + [Organize your resources with Azure management groups](../../management-groups/overview.md). \ No newline at end of file diff --git a/articles/guides/operations/TOC.yml b/articles/guides/operations/TOC.yml index b6988793ebc6..d476921fb056 100644 --- a/articles/guides/operations/TOC.yml +++ b/articles/guides/operations/TOC.yml @@ -59,5 +59,5 @@ - name: Pricing calculator href: https://azure.microsoft.com/pricing/calculator/ - name: Microsoft Azure portal overview - href: /azure/azure-portal/azure-portal-overview - - name: References + href: ../../azure-portal/azure-portal-overview.md + - name: References \ No newline at end of file diff --git a/articles/hdinsight/hbase/hbase-troubleshoot-phoenix-no-data.md b/articles/hdinsight/hbase/hbase-troubleshoot-phoenix-no-data.md index fab08580d6bc..386e7d40531e 100644 --- a/articles/hdinsight/hbase/hbase-troubleshoot-phoenix-no-data.md +++ b/articles/hdinsight/hbase/hbase-troubleshoot-phoenix-no-data.md @@ -3,7 +3,7 @@ title: HDP upgrade & no data in Apache Phoenix views in Azure HDInsight description: HDP upgrade causes no data in Apache Phoenix views in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 08/08/2019 +ms.date: 05/26/2022 --- # Scenario: HDP upgrade causes no data in Apache Phoenix views in Azure HDInsight @@ -30,4 +30,4 @@ If you didn't see your problem or are unable to solve your issue, visit one of t * Connect with [@AzureSupport](https://twitter.com/azuresupport) - the official Microsoft Azure account for improving customer experience. Connecting the Azure community to the right resources: answers, support, and experts. -* If you need more help, you can submit a support request from the [Azure portal](https://portal.azure.com/?#blade/Microsoft_Azure_Support/HelpAndSupportBlade/). Select **Support** from the menu bar or open the **Help + support** hub. For more detailed information, review [How to create an Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). Access to Subscription Management and billing support is included with your Microsoft Azure subscription, and Technical Support is provided through one of the [Azure Support Plans](https://azure.microsoft.com/support/plans/). \ No newline at end of file +* If you need more help, you can submit a support request from the [Azure portal](https://portal.azure.com/?#blade/Microsoft_Azure_Support/HelpAndSupportBlade/). Select **Support** from the menu bar or open the **Help + support** hub. For more detailed information, review [How to create an Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). Access to Subscription Management and billing support is included with your Microsoft Azure subscription, and Technical Support is provided through one of the [Azure Support Plans](https://azure.microsoft.com/support/plans/). diff --git a/articles/hdinsight/hdinsight-cluster-availability.md b/articles/hdinsight/hdinsight-cluster-availability.md index 783cb85fac0b..1bf57872d840 100644 --- a/articles/hdinsight/hdinsight-cluster-availability.md +++ b/articles/hdinsight/hdinsight-cluster-availability.md @@ -4,7 +4,7 @@ description: Learn how to use Apache Ambari to monitor cluster health and availa ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive,seoapr2020 -ms.date: 05/01/2020 +ms.date: 05/26/2022 --- # How to monitor cluster availability with Apache Ambari in Azure HDInsight diff --git a/articles/hdinsight/hdinsight-release-notes-archive.md b/articles/hdinsight/hdinsight-release-notes-archive.md index 52c3be24e0d3..e3739330b25c 100644 --- a/articles/hdinsight/hdinsight-release-notes-archive.md +++ b/articles/hdinsight/hdinsight-release-notes-archive.md @@ -262,7 +262,7 @@ This release applies for both HDInsight 3.6 and HDInsight 4.0. HDInsight release HDInsight added Dav4-series support in this release. Learn more about [Dav4-series here](../virtual-machines/dav4-dasv4-series.md). #### Kafka REST Proxy GA -Kafka REST Proxy enables you to interact with your Kafka cluster via a REST API over HTTPS. Kafka Rest Proxy is general available starting from this release. Learn more about [Kafka REST Proxy here](./kafka/rest-proxy.md). +Kafka REST Proxy enables you to interact with your Kafka cluster via a REST API over HTTPS. Kafka REST Proxy is general available starting from this release. Learn more about [Kafka REST Proxy here](./kafka/rest-proxy.md). #### Moving to Azure virtual machine scale sets HDInsight now uses Azure virtual machines to provision the cluster. The service is gradually migrating to [Azure virtual machine scale sets](../virtual-machine-scale-sets/overview.md). The entire process may take months. After your regions and subscriptions are migrated, newly created HDInsight clusters will run on virtual machine scale sets without customer actions. No breaking change is expected. diff --git a/articles/hdinsight/hortonworks-release-notes.md b/articles/hdinsight/hortonworks-release-notes.md index 65470ce570d1..ff0c0fff6d89 100644 --- a/articles/hdinsight/hortonworks-release-notes.md +++ b/articles/hdinsight/hortonworks-release-notes.md @@ -4,7 +4,7 @@ description: Learn the Apache Hadoop components and versions in Azure HDInsight. ms.service: hdinsight ms.topic: conceptual ms.custom: seoapr2020 -ms.date: 04/22/2020 +ms.date: 05/26/2022 --- # Hortonworks release notes associated with HDInsight versions @@ -55,4 +55,4 @@ The section provides links to release notes for the Hortonworks Data Platform di [hdp-1-3-0]: https://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.0/bk_releasenotes_hdp_1.x/content/ch_relnotes-hdp1.3.0_1.html -[hdp-1-1-0]: https://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.0/bk_releasenotes_hdp_1.x/content/ch_relnotes-hdp1.1.1.16_1.html \ No newline at end of file +[hdp-1-1-0]: https://docs.hortonworks.com/HDPDocuments/HDP1/HDP-1.3.0/bk_releasenotes_hdp_1.x/content/ch_relnotes-hdp1.1.1.16_1.html diff --git a/articles/hdinsight/interactive-query/apache-hive-replication.md b/articles/hdinsight/interactive-query/apache-hive-replication.md index c091946a0ead..9c41ba4bd98c 100644 --- a/articles/hdinsight/interactive-query/apache-hive-replication.md +++ b/articles/hdinsight/interactive-query/apache-hive-replication.md @@ -3,7 +3,7 @@ title: How to use Apache Hive replication in Azure HDInsight clusters description: Learn how to use Hive replication in HDInsight clusters to replicate the Hive metastore and the Azure Data Lake Storage Gen 2 data lake. ms.service: hdinsight ms.topic: conceptual -ms.date: 10/08/2020 +ms.date: 05/26/2022 --- # How to use Apache Hive replication in Azure HDInsight clusters @@ -219,4 +219,4 @@ To learn more about the items discussed in this article, see: - [Azure HDInsight business continuity](../hdinsight-business-continuity.md) - [Azure HDInsight business continuity architectures](../hdinsight-business-continuity-architecture.md) - [Azure HDInsight highly available solution architecture case study](../hdinsight-high-availability-case-study.md) -- [What is Apache Hive and HiveQL on Azure HDInsight?](../hadoop/hdinsight-use-hive.md) \ No newline at end of file +- [What is Apache Hive and HiveQL on Azure HDInsight?](../hadoop/hdinsight-use-hive.md) diff --git a/articles/hdinsight/interactive-query/apache-hive-warehouse-connector-zeppelin.md b/articles/hdinsight/interactive-query/apache-hive-warehouse-connector-zeppelin.md index 5dedbdb6dd8a..b8ef7a19c607 100644 --- a/articles/hdinsight/interactive-query/apache-hive-warehouse-connector-zeppelin.md +++ b/articles/hdinsight/interactive-query/apache-hive-warehouse-connector-zeppelin.md @@ -5,7 +5,7 @@ author: nis-goel ms.author: nisgoel ms.service: hdinsight ms.topic: how-to -ms.date: 05/28/2020 +ms.date: 05/26/2022 --- # Integrate Apache Zeppelin with Hive Warehouse Connector in Azure HDInsight @@ -133,4 +133,4 @@ hive.executeQuery("select * from testers").show() * [HWC and Apache Spark operations](./apache-hive-warehouse-connector-operations.md) * [HWC integration with Apache Spark and Apache Hive](./apache-hive-warehouse-connector.md) -* [Use Interactive Query with HDInsight](./apache-interactive-query-get-started.md) \ No newline at end of file +* [Use Interactive Query with HDInsight](./apache-interactive-query-get-started.md) diff --git a/articles/hdinsight/interactive-query/interactive-query-troubleshoot-tez-view-slow.md b/articles/hdinsight/interactive-query/interactive-query-troubleshoot-tez-view-slow.md index 5cf711953741..0b0dcaaac292 100644 --- a/articles/hdinsight/interactive-query/interactive-query-troubleshoot-tez-view-slow.md +++ b/articles/hdinsight/interactive-query/interactive-query-troubleshoot-tez-view-slow.md @@ -3,7 +3,7 @@ title: Apache Ambari Tez View loads slowly in Azure HDInsight description: Apache Ambari Tez View may load slowly or may not load at all in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 04/06/2020 +ms.date: 05/26/2022 --- # Scenario: Apache Ambari Tez View loads slowly in Azure HDInsight diff --git a/articles/hdinsight/interactive-query/llap-schedule-based-autoscale-best-practices.md b/articles/hdinsight/interactive-query/llap-schedule-based-autoscale-best-practices.md index cb3dd6abbfa4..c45beccd2304 100644 --- a/articles/hdinsight/interactive-query/llap-schedule-based-autoscale-best-practices.md +++ b/articles/hdinsight/interactive-query/llap-schedule-based-autoscale-best-practices.md @@ -31,7 +31,7 @@ Feature Supportability with HDInsight 4.0 Interactive Query(LLAP) Autoscale ### **Interactive Query Cluster setup for Autoscale** -1. [Create an HDInsight Interactive Query Cluster.](/azure/hdinsight/hdinsight-hadoop-provision-linux-clusters) +1. [Create an HDInsight Interactive Query Cluster.](../hdinsight-hadoop-provision-linux-clusters.md) 2. Post successful creation of cluster, navigate to **Azure Portal** and apply the recommended Script Action ``` @@ -49,7 +49,7 @@ Feature Supportability with HDInsight 4.0 Interactive Query(LLAP) Autoscale ``` -3. [Enable and Configure Schedule-Based Autoscale](/azure/hdinsight/hdinsight-autoscale-clusters#create-a-cluster-with-schedule-based-autoscaling) +3. [Enable and Configure Schedule-Based Autoscale](../hdinsight-autoscale-clusters.md#create-a-cluster-with-schedule-based-autoscaling) > [!NOTE] @@ -102,7 +102,7 @@ If the above guidelines didn't resolve your query, visit one of the following. * If you need more help, you can submit a support request from the [Azure portal](https://portal.azure.com/?#blade/Microsoft_Azure_Support/HelpAndSupportBlade/). Select **Support** from the menu bar or open the **Help + support** hub. For more detailed information, review [How to create an Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). Access to Subscription Management and billing support is included with your Microsoft Azure subscription, and Technical Support is provided through one of the [Azure Support Plans](https://azure.microsoft.com/support/plans/). ## **Other References:** - * [Interactive Query in Azure HDInsight](/azure/hdinsight/interactive-query/apache-interactive-query-get-started) - * [Create a cluster with Schedule-based Autoscaling](/azure/hdinsight/interactive-query/apache-interactive-query-get-started) - * [Azure HDInsight Interactive Query Cluster (Hive LLAP) sizing guide](/azure/hdinsight/interactive-query/hive-llap-sizing-guide) - * [Hive Warehouse Connector in Azure HDInsight](/azure/hdinsight/interactive-query/apache-hive-warehouse-connector) + * [Interactive Query in Azure HDInsight](./apache-interactive-query-get-started.md) + * [Create a cluster with Schedule-based Autoscaling](./apache-interactive-query-get-started.md) + * [Azure HDInsight Interactive Query Cluster (Hive LLAP) sizing guide](./hive-llap-sizing-guide.md) + * [Hive Warehouse Connector in Azure HDInsight](./apache-hive-warehouse-connector.md) \ No newline at end of file diff --git a/articles/hdinsight/kafka/tutorial-cli-rest-proxy.md b/articles/hdinsight/kafka/tutorial-cli-rest-proxy.md index 6822090a7e14..0b835c16c85e 100644 --- a/articles/hdinsight/kafka/tutorial-cli-rest-proxy.md +++ b/articles/hdinsight/kafka/tutorial-cli-rest-proxy.md @@ -50,13 +50,13 @@ If you don't have an Azure subscription, create a [free account](https://azure.m |storageAccount|Replace STORAGEACCOUNTNAME with a name for your new storage account.| |httpPassword|Replace PASSWORD with a password for the cluster login, **admin**.| |sshPassword|Replace PASSWORD with a password for the secure shell username, **sshuser**.| - |securityGroupName|Replace SECURITYGROUPNAME with the client AAD security group name for Kafka Rest Proxy. The variable will be passed to the `--kafka-client-group-name` parameter for `az-hdinsight-create`.| - |securityGroupID|Replace SECURITYGROUPID with the client AAD security group ID for Kafka Rest Proxy. The variable will be passed to the `--kafka-client-group-id` parameter for `az-hdinsight-create`.| + |securityGroupName|Replace SECURITYGROUPNAME with the client AAD security group name for Kafka REST Proxy. The variable will be passed to the `--kafka-client-group-name` parameter for `az-hdinsight-create`.| + |securityGroupID|Replace SECURITYGROUPID with the client AAD security group ID for Kafka REST Proxy. The variable will be passed to the `--kafka-client-group-id` parameter for `az-hdinsight-create`.| |storageContainer|Storage container the cluster will use, leave as-is for this tutorial. This variable will be set with the name of the cluster.| |workernodeCount|Number of worker nodes in the cluster, leave as-is for this tutorial. To guarantee high availability, Kafka requires a minimum of 3 worker nodes| |clusterType|Type of HDInsight cluster, leave as-is for this tutorial.| - |clusterVersion|HDInsight cluster version, leave as-is for this tutorial. Kafka Rest Proxy requires a minimum cluster version of 4.0.| - |componentVersion|Kafka version, leave as-is for this tutorial. Kafka Rest Proxy requires a minimum component version of 2.1.| + |clusterVersion|HDInsight cluster version, leave as-is for this tutorial. Kafka REST Proxy requires a minimum cluster version of 4.0.| + |componentVersion|Kafka version, leave as-is for this tutorial. Kafka REST Proxy requires a minimum component version of 2.1.| Update the variables with desired values. Then enter the CLI commands to set the environment variables. @@ -130,8 +130,8 @@ If you don't have an Azure subscription, create a [free account](https://azure.m |Parameter | Description| |---|---| |--kafka-management-node-size|The size of the node. This tutorial uses the value **Standard_D4_v2**.| - |--kafka-client-group-id|The client AAD security group ID for Kafka Rest Proxy. The value is passed from the variable **$securityGroupID**.| - |--kafka-client-group-name|The client AAD security group name for Kafka Rest Proxy. The value is passed from the variable **$securityGroupName**.| + |--kafka-client-group-id|The client AAD security group ID for Kafka REST Proxy. The value is passed from the variable **$securityGroupID**.| + |--kafka-client-group-name|The client AAD security group name for Kafka REST Proxy. The value is passed from the variable **$securityGroupName**.| |--version|The HDInsight cluster version must be at least 4.0. The value is passed from the variable **$clusterVersion**.| |--component-version|The Kafka version must be at least 2.1. The value is passed from the variable **$componentVersion**.| diff --git a/articles/hdinsight/spark/apache-spark-improve-performance-iocache.md b/articles/hdinsight/spark/apache-spark-improve-performance-iocache.md index 59a345214042..c11bb0ee534e 100644 --- a/articles/hdinsight/spark/apache-spark-improve-performance-iocache.md +++ b/articles/hdinsight/spark/apache-spark-improve-performance-iocache.md @@ -3,7 +3,7 @@ title: Apache Spark performance - Azure HDInsight IO Cache (Preview) description: Learn about Azure HDInsight IO Cache and how to use it to improve Apache Spark performance. ms.service: hdinsight ms.topic: how-to -ms.date: 12/23/2019 +ms.date: 05/26/2022 --- # Improve performance of Apache Spark workloads using Azure HDInsight IO Cache diff --git a/articles/hdinsight/spark/apache-spark-streaming-high-availability.md b/articles/hdinsight/spark/apache-spark-streaming-high-availability.md index b518bd993265..c28dde4abf8b 100644 --- a/articles/hdinsight/spark/apache-spark-streaming-high-availability.md +++ b/articles/hdinsight/spark/apache-spark-streaming-high-availability.md @@ -4,7 +4,7 @@ description: How to set up Apache Spark Streaming for a high-availability scenar ms.service: hdinsight ms.topic: how-to ms.custom: hdinsightactive -ms.date: 11/29/2019 +ms.date: 05/26/2022 --- # Create high-availability Apache Spark Streaming jobs with YARN diff --git a/articles/hdinsight/spark/apache-spark-troubleshoot-job-slowness-container.md b/articles/hdinsight/spark/apache-spark-troubleshoot-job-slowness-container.md index 14b316aa4dc9..049d81ab719d 100644 --- a/articles/hdinsight/spark/apache-spark-troubleshoot-job-slowness-container.md +++ b/articles/hdinsight/spark/apache-spark-troubleshoot-job-slowness-container.md @@ -3,7 +3,7 @@ title: Apache Spark slow when Azure HDInsight storage has many files description: Apache Spark job runs slowly when the Azure storage container contains many files in Azure HDInsight ms.service: hdinsight ms.topic: troubleshooting -ms.date: 08/21/2019 +ms.date: 05/26/2022 --- # Apache Spark job run slowly when the Azure storage container contains many files in Azure HDInsight diff --git a/articles/hdinsight/spark/apache-spark-zeppelin-notebook.md b/articles/hdinsight/spark/apache-spark-zeppelin-notebook.md index aa6d8959f3c5..4a7e3de2d12e 100644 --- a/articles/hdinsight/spark/apache-spark-zeppelin-notebook.md +++ b/articles/hdinsight/spark/apache-spark-zeppelin-notebook.md @@ -165,6 +165,21 @@ Privileged domain users can use the `Shiro.ini` file to control access to the In /api/interpreter/** = authc, roles[adminGroupName] ``` +### Example shiro.ini for multiple domain groups: + + ``` + [main] + anyofrolesuser = org.apache.zeppelin.utils.AnyOfRolesUserAuthorizationFilter + + [roles] + group1 = * + group2 = * + group3 = * + + [urls] + /api/interpreter/** = authc, anyofrolesuser[group1, group2, group3] + ``` + ## Livy session management The first code paragraph in your Zeppelin notebook creates a new Livy session in your cluster. This session is shared across all Zeppelin notebooks that you later create. If the Livy session is killed for any reason, jobs won't run from the Zeppelin notebook. diff --git a/articles/healthcare-apis/authentication-authorization.md b/articles/healthcare-apis/authentication-authorization.md index c72ee004d92e..19c4c7ad23f3 100644 --- a/articles/healthcare-apis/authentication-authorization.md +++ b/articles/healthcare-apis/authentication-authorization.md @@ -102,7 +102,7 @@ You can use online tools such as [https://jwt.ms](https://jwt.ms/) to view the t **The access token is valid for one hour by default. You can obtain a new token or renew it using the refresh token before it expires.** -To obtain an access token, you can use tools such as Postman, the Rest Client extension in Visual Studio Code, PowerShell, CLI, curl, and the [Azure AD authentication libraries](../active-directory/develop/reference-v2-libraries.md). +To obtain an access token, you can use tools such as Postman, the REST Client extension in Visual Studio Code, PowerShell, CLI, curl, and the [Azure AD authentication libraries](../active-directory/develop/reference-v2-libraries.md). ## Encryption diff --git a/articles/healthcare-apis/azure-api-for-fhir/davinci-drug-formulary-tutorial.md b/articles/healthcare-apis/azure-api-for-fhir/davinci-drug-formulary-tutorial.md index 6b912afbf2ea..31fd3db915e9 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/davinci-drug-formulary-tutorial.md +++ b/articles/healthcare-apis/azure-api-for-fhir/davinci-drug-formulary-tutorial.md @@ -6,8 +6,8 @@ ms.service: healthcare-apis ms.subservice: fhir ms.topic: tutorial ms.reviewer: matjazl -ms.author: cavoeg -author: modillon +ms.author: dseven +author: dougseven ms.date: 02/15/2022 --- diff --git a/articles/healthcare-apis/azure-api-for-fhir/get-started-with-azure-api-fhir.md b/articles/healthcare-apis/azure-api-for-fhir/get-started-with-azure-api-fhir.md index 5be1a4d4697d..7eb8d0c81452 100644 --- a/articles/healthcare-apis/azure-api-for-fhir/get-started-with-azure-api-fhir.md +++ b/articles/healthcare-apis/azure-api-for-fhir/get-started-with-azure-api-fhir.md @@ -34,7 +34,7 @@ Refer to the steps in the [Quickstart guide](fhir-paas-portal-quickstart.md) for ## Accessing Azure API for FHIR -When you're working with healthcare data, it's important to ensure that the data is secure, and it can't be accessed by unauthorized users or applications. FHIR servers use [OAuth 2.0](https://oauth.net/2/) to ensure this data security. Azure API for FHIR is secured using [Azure Active Directory (Azure AD)](https://docs.microsoft.com/azure/active-directory/), which is an example of an OAuth 2.0 identity provider. [Azure AD identity configuration for Azure API for FHIR](././../azure-api-for-fhir/azure-active-directory-identity-configuration.md) provides an overview of FHIR server authorization, and the steps needed to obtain a token to access a FHIR server. While these steps apply to any FHIR server and any identity provider, this article will walk you through Azure API for FHIR as the FHIR server and Azure AD as our identity provider. For more information about accessing Azure API for FHIR, see [Access control overview](././../azure-api-for-fhir/azure-active-directory-identity-configuration.md#access-control-overview). +When you're working with healthcare data, it's important to ensure that the data is secure, and it can't be accessed by unauthorized users or applications. FHIR servers use [OAuth 2.0](https://oauth.net/2/) to ensure this data security. Azure API for FHIR is secured using [Azure Active Directory (Azure AD)](../../active-directory/index.yml), which is an example of an OAuth 2.0 identity provider. [Azure AD identity configuration for Azure API for FHIR](././../azure-api-for-fhir/azure-active-directory-identity-configuration.md) provides an overview of FHIR server authorization, and the steps needed to obtain a token to access a FHIR server. While these steps apply to any FHIR server and any identity provider, this article will walk you through Azure API for FHIR as the FHIR server and Azure AD as our identity provider. For more information about accessing Azure API for FHIR, see [Access control overview](././../azure-api-for-fhir/azure-active-directory-identity-configuration.md#access-control-overview). ### Access token validation @@ -51,7 +51,7 @@ For more information about the two kinds of application registrations, see [Regi ## Configure Azure RBAC for FHIR -The article [Configure Azure RBAC for FHIR](configure-azure-rbac.md), describes how to use [Azure role-based access control (Azure RBAC)](https://docs.microsoft.com/azure/role-based-access-control/) to assign access to the Azure API for FHIR data plane. Azure RBAC is the preferred method for assigning data plane access when data plane users are managed in the Azure AD tenant associated with your Azure subscription. If you're using an external Azure AD tenant, refer to the [local RBAC assignment reference](configure-local-rbac.md). +The article [Configure Azure RBAC for FHIR](configure-azure-rbac.md), describes how to use [Azure role-based access control (Azure RBAC)](../../role-based-access-control/index.yml) to assign access to the Azure API for FHIR data plane. Azure RBAC is the preferred method for assigning data plane access when data plane users are managed in the Azure AD tenant associated with your Azure subscription. If you're using an external Azure AD tenant, refer to the [local RBAC assignment reference](configure-local-rbac.md). ## Next steps @@ -61,7 +61,4 @@ This article described the basic steps to get started using Azure API for FHIR. >[What is Azure API for FHIR?](overview.md) >[!div class="nextstepaction"] ->[Frequently asked questions about Azure API for FHIR](fhir-faq.yml) - - - +>[Frequently asked questions about Azure API for FHIR](fhir-faq.yml) \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/import-data.md b/articles/healthcare-apis/fhir/import-data.md index 4b681c42b96b..e4194eca09d2 100644 --- a/articles/healthcare-apis/fhir/import-data.md +++ b/articles/healthcare-apis/fhir/import-data.md @@ -219,7 +219,7 @@ Below are some error codes you may encounter and the solutions to help you resol **Cause:** We use managed identity for source storage auth. This error may be caused by a missing or wrong role assignment. -**Solution:** Assign _Storage Blob Data Contributor_ role to the FHIR server following [the RBAC guide.](https://docs.microsoft.com/azure/role-based-access-control/role-assignments-portal?tabs=current) +**Solution:** Assign _Storage Blob Data Contributor_ role to the FHIR server following [the RBAC guide.](../../role-based-access-control/role-assignments-portal.md?tabs=current) ### 500 Internal Server Error @@ -263,4 +263,4 @@ In this article, you've learned about how the Bulk import feature enables import >[Configure export settings and set up a storage account](configure-export-data.md) >[!div class="nextstepaction"] ->[Copy data from Azure API for FHIR to Azure Synapse Analytics](copy-to-synapse.md) +>[Copy data from Azure API for FHIR to Azure Synapse Analytics](copy-to-synapse.md) \ No newline at end of file diff --git a/articles/healthcare-apis/fhir/using-curl.md b/articles/healthcare-apis/fhir/using-curl.md index 473b8f8ceeeb..fe05b4f42418 100644 --- a/articles/healthcare-apis/fhir/using-curl.md +++ b/articles/healthcare-apis/fhir/using-curl.md @@ -19,7 +19,7 @@ In this article, you'll learn how to access Azure Health Data Services with cURL * An Azure account with an active subscription. [Create one for free](https://azure.microsoft.com/free/). * If you want to run the code locally, install [PowerShell](/powershell/module/powershellget/) and [Azure Az PowerShell](/powershell/azure/install-az-ps). -* Optionally, you can run the scripts in Visual Studio Code with the Rest Client extension. For more information, see [Make a link to the Rest Client doc](using-rest-client.md). +* Optionally, you can run the scripts in Visual Studio Code with the REST Client extension. For more information, see [Make a link to the REST Client doc](using-rest-client.md). * Download and install [cURL](https://curl.se/download.html). ### CLI @@ -27,7 +27,7 @@ In this article, you'll learn how to access Azure Health Data Services with cURL * An Azure account with an active subscription. [Create one for free](https://azure.microsoft.com/free/). * If you want to run the code locally, install [Azure CLI](/cli/azure/install-azure-cli). * Optionally, install a Bash shell, such as Git Bash, which it's included in [Git for Windows](https://gitforwindows.org/). -* Optionally, run the scripts in Visual Studio Code with the Rest Client extension. For more information, see [Make a link to the Rest Client doc](using-rest-client.md). +* Optionally, run the scripts in Visual Studio Code with the REST Client extension. For more information, see [Make a link to the REST Client doc](using-rest-client.md). * Download and install [cURL](https://curl.se/download.html). ## Obtain Azure Access Token diff --git a/articles/healthcare-apis/get-access-token.md b/articles/healthcare-apis/get-access-token.md index e1f01a673ddb..1e10326c6d26 100644 --- a/articles/healthcare-apis/get-access-token.md +++ b/articles/healthcare-apis/get-access-token.md @@ -65,7 +65,7 @@ In this article, you learned how to obtain an access token for the FHIR service >[Access FHIR service using Postman](./fhir/use-postman.md) >[!div class="nextstepaction"] ->[Access FHIR service using Rest Client](./fhir/using-rest-client.md) +>[Access FHIR service using REST Client](./fhir/using-rest-client.md) >[!div class="nextstepaction"] >[Access DICOM service using cURL](dicom/dicomweb-standard-apis-curl.md) diff --git a/articles/healthcare-apis/iot/iot-git-projects.md b/articles/healthcare-apis/iot/iot-git-projects.md index 8f9647f4c6b0..7bb44f8da85d 100644 --- a/articles/healthcare-apis/iot/iot-git-projects.md +++ b/articles/healthcare-apis/iot/iot-git-projects.md @@ -5,7 +5,7 @@ services: healthcare-apis author: msjasteppe ms.service: healthcare-apis ms.topic: reference -ms.date: 02/16/2022 +ms.date: 05/25/2022 ms.author: jasteppe --- # Open-source projects @@ -34,9 +34,9 @@ HealthKit * [microsoft/healthkit-to-fhir](https://github.com/microsoft/healthkit-to-fhir): Provides a simple way to create FHIR Resources from HKObjects -Google Fit on FHIR +Fit on FHIR -* [microsoft/googlefit-on-fhir](https://github.com/microsoft/googlefit-on-fhir): Bring Google Fit® data to a FHIR service. +* [microsoft/fit-on-fhir](https://github.com/microsoft/fit-on-fhir): Bring Google Fit® data to a FHIR service. Health Data Sync diff --git a/articles/healthcare-apis/register-application.md b/articles/healthcare-apis/register-application.md index 352de3675ac5..5505bc48076a 100644 --- a/articles/healthcare-apis/register-application.md +++ b/articles/healthcare-apis/register-application.md @@ -88,7 +88,7 @@ The following steps are required for the DICOM service, but optional for the FHI [ ![Select permissions scopes.](dicom/media/dicom-select-scopes.png) ](dicom/media/dicom-select-scopes.png#lightbox) >[!NOTE] ->Use grant_type of client_credentials when trying to otain an access token for the FHIR service using tools such as Postman or Rest Client. For more details, visit [Access using Postman](./fhir/use-postman.md) and [Accessing Azure Health Data Services using the REST Client Extension in Visual Studio Code](./fhir/using-rest-client.md). +>Use grant_type of client_credentials when trying to otain an access token for the FHIR service using tools such as Postman or REST Client. For more details, visit [Access using Postman](./fhir/use-postman.md) and [Accessing Azure Health Data Services using the REST Client Extension in Visual Studio Code](./fhir/using-rest-client.md). >>Use grant_type of client_credentials or authentication_doe when trying to obtain an access token for the DICOM service. For more details, visit [Using DICOM with cURL](dicom/dicomweb-standard-apis-curl.md). Your application registration is now complete. diff --git a/articles/hpc-cache/access-policies.md b/articles/hpc-cache/access-policies.md index 9b50b948d723..c9b1ec2a14a8 100644 --- a/articles/hpc-cache/access-policies.md +++ b/articles/hpc-cache/access-policies.md @@ -1,11 +1,11 @@ --- title: Use access policies in Azure HPC Cache description: How to create and apply custom access policies to limit client access to storage targets in Azure HPC Cache -author: femila +author: ekpgh ms.service: hpc-cache ms.topic: how-to -ms.date: 03/11/2021 -ms.author: femila +ms.date: 05/19/2022 +ms.author: v-erinkelly --- # Control client access @@ -22,7 +22,7 @@ If you don't need fine-grained control over storage target access, you can use t ## Create a client access policy -Use the **Client access policies** page in the Azure portal to create and manage policies. +Use the **Client access policies** page in the Azure portal to create and manage policies. [![screenshot of client access policies page. Several policies are defined, and some are expanded to show their rules](media/policies-overview.png)](media/policies-overview.png#lightbox) @@ -76,11 +76,11 @@ Check this box to allow the specified clients to directly mount this export's su Choose whether or not to set root squash for clients that match this rule. -This setting controls how Azure HPC Cache treats requests from the root user on client machines. When root squash is enabled, root users from a client are automatically mapped to a non-privileged user when they send requests through the Azure HPC Cache. It also prevents client requests from using set-UID permission bits. +This setting controls how Azure HPC Cache treats requests from the root user on client machines. When root squash is enabled, root users from a client are automatically mapped to a non-privileged user when they send requests through the Azure HPC Cache. It also prevents client requests from using set-UID permission bits. If root squash is disabled, a request from the client root user (UID 0) is passed through to a back-end NFS storage system as root. This configuration might allow inappropriate file access. -Setting root squash for client requests can help compensate for the required ``no_root_squash`` setting on NAS systems that are used as storage targets. (Read more about [NFS storage target prerequisites](hpc-cache-prerequisites.md#nfs-storage-requirements).) It also can improve security when used with Azure Blob storage targets. +Setting root squash for client requests can provide extra security for your storage target back-end systems. This might be important if you use a NAS system that is configured with ``no_root_squash`` as a storage target. (Read more about [NFS storage target prerequisites](hpc-cache-prerequisites.md#nfs-storage-requirements).) If you turn on root squash, you must also set the anonymous ID user value. The portal accepts integer values between 0 and 4294967295. (The old values -2 and -1 are supported for backward compatibility, but not recommended for new configurations.) diff --git a/articles/hpc-cache/configuration.md b/articles/hpc-cache/configuration.md index 9f0c3bbd7b7f..81f7642da026 100644 --- a/articles/hpc-cache/configuration.md +++ b/articles/hpc-cache/configuration.md @@ -1,11 +1,11 @@ --- title: Configure Azure HPC Cache settings description: Explains how to configure additional settings for the cache like MTU, custom NTP and DNS configuration, and how to access the express snapshots from Azure Blob storage targets. -author: ronhogue +author: ekpgh ms.service: hpc-cache ms.topic: how-to -ms.date: 04/08/2021 -ms.author: rohogue +ms.date: 05/16/2022 +ms.author: v-erinkelly --- # Configure additional Azure HPC Cache settings @@ -18,9 +18,6 @@ To see the settings, open the cache's **Networking** page in the Azure portal. ![screenshot of networking page in Azure portal](media/networking-page.png) -> [!NOTE] -> A previous version of this page included a cache-level root squash setting, but this setting has moved to [client access policies](access-policies.md). - diff --git a/articles/hpc-cache/hpc-cache-prerequisites.md b/articles/hpc-cache/hpc-cache-prerequisites.md index bf3671fe525f..3897ec7607d1 100644 --- a/articles/hpc-cache/hpc-cache-prerequisites.md +++ b/articles/hpc-cache/hpc-cache-prerequisites.md @@ -230,15 +230,7 @@ More information is included in [Troubleshoot NAS configuration and NFS storage * Check firewall settings to be sure that they allow traffic on all of these required ports. Be sure to check firewalls used in Azure as well as on-premises firewalls in your data center. -* Root access (read/write): The cache connects to the back-end system as user ID 0. Check these settings on your storage system: - - * Enable `no_root_squash`. This option ensures that the remote root user can access files owned by root. - - * Check export policies to make sure they don't include restrictions on root access from the cache's subnet. - - * If your storage has any exports that are subdirectories of another export, make sure the cache has root access to the lowest segment of the path. Read [Root access on directory paths](troubleshoot-nas.md#allow-root-access-on-directory-paths) in the NFS storage target troubleshooting article for details. - -* NFS back-end storage must be a compatible hardware/software platform. The storage must support NFS Version 3 (NFSv3). Contact the Azure HPC Cache team for more details. +* NFS back-end storage must be a compatible hardware/software platform. The storage must support NFS Version 3 (NFSv3). Contact the Azure HPC Cache team for details. ### NFS-mounted blob (ADLS-NFS) storage requirements diff --git a/articles/hpc-cache/troubleshoot-nas.md b/articles/hpc-cache/troubleshoot-nas.md index 56b2c64bac3a..2ffba35b0681 100644 --- a/articles/hpc-cache/troubleshoot-nas.md +++ b/articles/hpc-cache/troubleshoot-nas.md @@ -1,18 +1,18 @@ --- title: Troubleshoot Azure HPC Cache NFS storage targets description: Tips to avoid and fix configuration errors and other problems that can cause failure when creating an NFS storage target -author: femila +author: ekpgh ms.service: hpc-cache ms.topic: troubleshooting -ms.date: 03/18/2020 -ms.author: femila +ms.date: 05/26/2022 +ms.author: v-erinkelly --- # Troubleshoot NAS configuration and NFS storage target issues This article gives solutions for some common configuration errors and other issues that could prevent Azure HPC Cache from adding an NFS storage system as a storage target. -This article includes details about how to check ports and how to enable root access to a NAS system. It also includes detailed information about less common issues that might cause NFS storage target creation to fail. +This article includes details about how to check ports and how to enable needed access to a NAS system. It also includes detailed information about less common issues that might cause NFS storage target creation to fail. > [!TIP] > Before using this guide, read [prerequisites for NFS storage targets](hpc-cache-prerequisites.md#nfs-storage-requirements). @@ -47,26 +47,41 @@ Make sure that all of the ports returned by the ``rpcinfo`` query allow unrestri Check these settings both on the NAS itself and also on any firewalls between the storage system and the cache subnet. -## Check root access +## Check root squash settings -Azure HPC Cache needs access to your storage system's exports to create the storage target. Specifically, it mounts the exports as user ID 0. +Root squash settings can disrupt file access if they are improperly configured. You should check that the settings on each storage export and on the matching HPC Cache client access policies are consistent. -Different storage systems use different methods to enable this access: +Root squash prevents requests sent by a local superuser root on the client from being sent to a back-end storage system as root. It reassigns requests from root to a non-privileged user ID (UID) like 'nobody'. -* Linux servers generally add ``no_root_squash`` to the exported path in ``/etc/exports``. -* NetApp and EMC systems typically control access with export rules that are tied to specific IP addresses or networks. +> [!TIP] +> +> Previous versions of Azure HPC Cache required NAS storage systems to allow root access from the HPC Cache. Now, you don't need to allow root access on a storage target export unless you want HPC Cache clients to have root access to the export. + +Root squash can be configured in an HPC Cache system in these places: + +* At the Azure HPC Cache - Use [client access policies](access-policies.md#root-squash) to configure root squash for clients that match specific filter rules. A client access policy is part of each NFS storage target namespace path. + + The default client access policy does not squash root. + +* At the storage export - You can configure your storage system to reassign incoming requests from root to a non-privileged user ID (UID). + +These two settings should match. That is, if a storage system export squashes root, you should change its HPC Cache client access rule to also squash root. If the settings don't match, you can have access problems when you try to read or write to the back-end storage system through the HPC Cache. -If using export rules, remember that the cache can use multiple different IP addresses from the cache subnet. Allow access from the full range of possible subnet IP addresses. +This table illustrates the behavior for different root squash scenarios when a client request is sent as UID 0 (root). The scenarios marked with * are ***not recommended*** because they can cause access problems. -> [!NOTE] -> Although the cache needs root access to the back-end storage system, you can restrict access for clients that connect through the cache. Read [Control client access](access-policies.md#root-squash) for details. +| Setting | UID sent from client | UID sent from HPC Cache | Effective UID on back-end storage | +|--|--|--|--| +| no root squash | 0 (root) | 0 (root) | 0 (root) | +| *root squash at HPC Cache only | 0 (root) | 65534 (nobody) | 65534 (nobody) | +| *root squash at NAS storage only | 0 (root) | 0 (root) | 65534 (nobody) | +| root squash at HPC Cache and NAS | 0 (root) | 65534 (nobody) | 65534 (nobody) | -Work with your NAS storage vendor to enable the right level of access for the cache. +(UID 65534 is an example; when you turn on root squash in a client access policy you can customize the UID.) -### Allow root access on directory paths - +## Check access on directory paths + -For NAS systems that export hierarchical directories, Azure HPC Cache needs root access to each export level. +For NAS systems that export hierarchical directories, check that Azure HPC Cache has appropriate access to each export level in the path to the files you are using. For example, a system might show three exports like these: @@ -76,7 +91,7 @@ For example, a system might show three exports like these: The export ``/ifs/accounting/payroll`` is a child of ``/ifs/accounting``, and ``/ifs/accounting`` is itself a child of ``/ifs``. -If you add the ``payroll`` export as an HPC Cache storage target, the cache actually mounts ``/ifs/`` and accesses the payroll directory from there. So Azure HPC Cache needs root access to ``/ifs`` in order to access the ``/ifs/accounting/payroll`` export. +If you add the ``payroll`` export as an HPC Cache storage target, the cache actually mounts ``/ifs/`` and accesses the payroll directory from there. So Azure HPC Cache needs sufficient access to ``/ifs`` in order to access the ``/ifs/accounting/payroll`` export. This requirement is related to the way the cache indexes files and avoids file collisions, using file handles that the storage system provides. @@ -84,7 +99,7 @@ A NAS system with hierarchical exports can give different file handles for the s The back-end storage system keeps internal aliases for file handles, but Azure HPC Cache cannot tell which file handles in its index reference the same item. So it is possible that the cache can have different writes cached for the same file, and apply the changes incorrectly because it does not know that they are the same file. -To avoid this possible file collision for files in multiple exports, Azure HPC Cache automatically mounts the shallowest available export in the path (``/ifs`` in the example) and uses the file handle given from that export. If multiple exports use the same base path, Azure HPC Cache needs root access to that path. +To avoid this possible file collision for files in multiple exports, Azure HPC Cache automatically mounts the shallowest available export in the path (``/ifs`` in the example) and uses the file handle given from that export. If multiple exports use the same base path, Azure HPC Cache needs access to that path. - See [Monitor Azure Load Testing](monitor-load-testing.md) for a description of monitoring Azure Load Testing. -- See [Monitor Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitor Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/load-testing/monitor-load-testing.md b/articles/load-testing/monitor-load-testing.md index 9bfe8fa12c68..2360c8e21712 100644 --- a/articles/load-testing/monitor-load-testing.md +++ b/articles/load-testing/monitor-load-testing.md @@ -37,7 +37,7 @@ The following sections build on this article by describing the specific data gat ## Monitoring data -Azure Load Testing collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](/azure/azure-monitor/essentials/monitor-azure-resource#monitoring-data-from-Azure-resources). +Azure Load Testing collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](../azure-monitor/essentials/monitor-azure-resource.md#monitoring-data-from-azure-resources). See [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md) for detailed information on logs metrics created by Azure Load Testing. @@ -60,9 +60,9 @@ The following sections describe which types of logs you can collect. Data in Azure Monitor Logs is stored in tables where each table has its own set of unique properties. -All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](/azure/azure-monitor/essentials/resource-logs-schema). You can find the schema for Azure Load Testing resource logs in the [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md#resource-logs). +All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](../azure-monitor/essentials/resource-logs-schema.md). You can find the schema for Azure Load Testing resource logs in the [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md#resource-logs). -The [Activity log](/azure/azure-monitor/essentials/activity-log) is a type of platform log in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. +The [Activity log](../azure-monitor/essentials/activity-log.md) is a type of platform log in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. For a list of resource logs types collected for Azure Load Testing, see [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md#resource-logs). @@ -71,7 +71,7 @@ For a list of resource logs types collected for Azure Load Testing, see [Monitor > [!IMPORTANT] -> When you select **Logs** from the Azure Load Testing menu, Log Analytics is opened with the query scope set to the current [service name]. This means that log queries will only include data from that resource. If you want to run a query that includes data from other [service resource] or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](/azure/azure-monitor/logs/scope) for details. +> When you select **Logs** from the Azure Load Testing menu, Log Analytics is opened with the query scope set to the current [service name]. This means that log queries will only include data from that resource. If you want to run a query that includes data from other [service resource] or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](../azure-monitor/logs/scope.md) for details. Following are queries that you can use to help you monitor your Azure Load Testing resources: @@ -98,4 +98,4 @@ AzureLoadTestingOperation - See [Monitor Azure Load Testing data reference](monitor-load-testing-reference.md) for a reference of the metrics, logs, and other important values created by Azure Load Testing. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/essentials/monitor-azure-resource) for details on monitoring Azure resources. +- See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources. diff --git a/articles/load-testing/quickstart-create-and-run-load-test.md b/articles/load-testing/quickstart-create-and-run-load-test.md index 4d8d07db7f99..99d67cf9fc79 100644 --- a/articles/load-testing/quickstart-create-and-run-load-test.md +++ b/articles/load-testing/quickstart-create-and-run-load-test.md @@ -25,7 +25,7 @@ Learn more about the [key concepts for Azure Load Testing](./concept-load-testin ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- Azure RBAC role with permission to create and manage resources in the subscription, such as [Contributor](/azure/role-based-access-control/built-in-roles#contributor) or [Owner](/azure/role-based-access-control/built-in-roles#owner) +- Azure RBAC role with permission to create and manage resources in the subscription, such as [Contributor](../role-based-access-control/built-in-roles.md#contributor) or [Owner](../role-based-access-control/built-in-roles.md#owner) ## Create an Azure Load Testing resource @@ -112,4 +112,4 @@ You now have an Azure Load Testing resource, which you used to load test an exte You can reuse this resource to learn how to identify performance bottlenecks in an Azure-hosted application by using server-side metrics. > [!div class="nextstepaction"] -> [Identify performance bottlenecks](./tutorial-identify-bottlenecks-azure-portal.md) +> [Identify performance bottlenecks](./tutorial-identify-bottlenecks-azure-portal.md) \ No newline at end of file diff --git a/articles/logic-apps/logic-apps-add-run-inline-code.md b/articles/logic-apps/logic-apps-add-run-inline-code.md index 4b1c6a9e7c16..acabc092cb6e 100644 --- a/articles/logic-apps/logic-apps-add-run-inline-code.md +++ b/articles/logic-apps/logic-apps-add-run-inline-code.md @@ -1,120 +1,222 @@ --- -title: Add and run code snippets by using inline code -description: Learn how to create and run code snippets by using inline code actions for automated tasks and workflows that you create with Azure Logic Apps. +title: Run code snippets in workflows +description: Run code snippets in workflows using Inline Code operations in Azure Logic Apps. services: logic-apps ms.suite: integration ms.reviewer: deli, estfan, azla ms.topic: how-to -ms.date: 05/25/2021 +ms.date: 05/24/2022 ms.custom: devx-track-js --- -# Add and run code snippets by using inline code in Azure Logic Apps +# Run code snippets in workflows with Inline Code operations in Azure Logic Apps -When you want to run a piece of code inside your logic app workflow, you can add the built-in Inline Code action as a step in your logic app's workflow. This action works best when you want to run code that fits this scenario: +To create and run a code snippet in your logic app workflow without much setup, you can use the **Inline Code** built-in connector. This connector has an action that returns the result from the code snippet so that you can use that output in your workflow's subsequent actions. -* Runs in JavaScript. More languages are in development. +Currently, the connector only has a single action, which works best for a code snippet with the following attributes, but more actions are in development. The Inline Code connector also has +[different limits](logic-apps-limits-and-config.md#inline-code-action-limits), based on whether your logic app workflow is [Consumption or Standard](logic-apps-overview.md#resource-environment-differences). -* Finishes running in five seconds or fewer. +| Action | Language | Language version | Run duration | Data size | Other notes | +|--------|----------|------------------|--------------|-----------|-------------| +| **Execute JavaScript Code** | JavaScript | **Standard**:
    Node.js 12.x.x or 14.x.x

    **Consumption**:
    Node.js 8.11.1

    For more information, review [Standard built-in objects](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects). | Finishes in 5 seconds or fewer. | Handles data up to 50 MB. | - Doesn't require working with the [**Variables** actions](logic-apps-create-variables-store-values.md), which are unsupported by the action.

    - Doesn't support the `require()` function for running JavaScript. | +||||||| -* Handles data up to 50 MB in size. +To run code that doesn't fit these attributes, you can [create and call a function through Azure Functions](logic-apps-azure-functions.md) instead. -* Doesn't require working with the [**Variables** actions](../logic-apps/logic-apps-create-variables-store-values.md), which are not yet supported. +This article shows how the action works in an example workflow that starts with an Office 365 Outlook trigger. The workflow runs when a new email arrives in the associated Outlook email account. The sample code snippet extracts any email addresses that exist the email body and returns those addresses as output that you can use in a subsequent action. -* Uses Node.js version 8.11.1 for [multi-tenant based logic apps](logic-apps-overview.md) or [Node.js versions 12.x.x or 14.x.x](https://nodejs.org/en/download/releases/) for [single-tenant based logic apps](single-tenant-overview-compare.md). +The following diagram shows the highlights from example workflow: - For more information, see [Standard built-in objects](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects). +### [Consumption](#tab/consumption) - > [!NOTE] - > The `require()` function isn't supported by the Inline Code action for running JavaScript. +![Screenshot showing an example Consumption logic app workflow with the Inline Code action.](./media/logic-apps-add-run-inline-code/inline-code-overview-consumption.png) -This action runs the code snippet and returns the output from that snippet as a token that's named `Result`. You can use this token with subsequent actions in your logic app's workflow. For other scenarios where you want to create a function for your code, try [creating and calling a function through Azure Functions instead](../logic-apps/logic-apps-azure-functions.md) in your logic app. +### [Standard](#tab/standard) -In this article, the example logic app triggers when a new email arrives in a work or school account. The code snippet extracts and returns any email addresses that appear in the email body. +![Screenshot showing an example Standard logic app workflow with the Inline Code action.](./media/logic-apps-add-run-inline-code/inline-code-overview-standard.png) -![Screenshot that shows an example logic app](./media/logic-apps-add-run-inline-code/inline-code-example-overview.png) +--- ## Prerequisites -* An Azure account and subscription. If you don't have an Azure subscription, [sign up for a free Azure account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +* An Azure account and subscription. If you don't have a subscription, [sign up for a free Azure account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). + +* The logic app workflow where you want to add your code snippet. The workflow must already start with a trigger. -* The logic app workflow where you want to add your code snippet, including a trigger. The example in this topic uses the Office 365 Outlook trigger that's named **When a new email arrives**. + This article's example uses the Office 365 Outlook trigger that's named **When a new email arrives**. - If you don't have a logic app, review the following documentation: + If you don't have a workflow, review the following documentation: - * Multi-tenant: [Quickstart: Create your first logic app](../logic-apps/quickstart-create-first-logic-app-workflow.md) - * Single-tenant: [Create single-tenant based logic app workflows](create-single-tenant-workflows-azure-portal.md) + * Consumption: [Quickstart: Create your first logic app](quickstart-create-first-logic-app-workflow.md) -* Based on whether your logic app is multi-tenant or single-tenant, review the following information. + * Standard: [Create single-tenant based logic app workflows](create-single-tenant-workflows-azure-portal.md) - * Multi-tenant: Requires Node.js version 8.11.1. You also need an empty [integration account](../logic-apps/logic-apps-enterprise-integration-create-integration-account.md) that's linked to your logic app. Make sure that you use an integration account that's appropriate for your use case or scenario. +* Based on whether your logic app is Consumption or Standard, review the following requirements: - For example, [Free-tier](../logic-apps/logic-apps-pricing.md#integration-accounts) integration accounts are meant only for exploratory scenarios and workloads, not production scenarios, are limited in usage and throughput, and aren't supported by a service-level agreement (SLA). + * Consumption: Requires [Node.js version 8.11.10](https://nodejs.org/en/download/releases/) and a [link to an integration account](logic-apps-enterprise-integration-create-integration-account.md), empty or otherwise, from your logic app resource. - Other integration account tiers incur costs, but include SLA support, offer more throughput, and have higher limits. Learn more about integration account [tiers](../logic-apps/logic-apps-pricing.md#integration-accounts), [pricing](https://azure.microsoft.com/pricing/details/logic-apps/), and [limits](../logic-apps/logic-apps-limits-and-config.md#integration-account-limits). + > [!IMPORTANT] + > + > Make sure that you use an integration account that's appropriate for your use case or scenario. + > + > For example, [Free-tier](logic-apps-pricing.md#integration-accounts) integration accounts are meant only + > for exploratory scenarios and workloads, not production scenarios, are limited in usage and throughput, + > and aren't supported by a service-level agreement (SLA). + > + > Other integration account tiers incur costs, but include SLA support, offer more throughput, and have higher limits. + > Learn more about [integration account tiers](logic-apps-pricing.md#integration-accounts), + > [limits](logic-apps-limits-and-config.md#integration-account-limits), and + > [pricing](https://azure.microsoft.com/pricing/details/logic-apps/). - * Single-tenant: Requires [Node.js versions 10.x.x, 11.x.x, or 12.x.x](https://nodejs.org/en/download/releases/). However, you don't need an integration account, but the Inline Code action is renamed **Inline Code Operations** and has [updated limits](logic-apps-limits-and-config.md). + * Standard: Requires [Node.js versions 12.x.x or 14.x.x](https://nodejs.org/en/download/releases/), but no integration account. -## Add inline code +## Add the Inline Code action -1. If you haven't already, in the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. +### [Consumption](#tab/consumption) -1. In your workflow, choose where to add the Inline Code action, either as a new step at the end of your workflow or between steps. +1. In the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. - To add the action between steps, move your mouse pointer over the arrow that connects those steps. Select the plus sign (**+**) that appears, and select **Add an action**. +1. On the designer, add the Inline Code action to your workflow. You can add an action either as a new step at the end of your workflow or between steps. This example adds the action under the Office 365 Outlook trigger. - This example adds the action under the Office 365 Outlook trigger. + * To add the action at the end of your workflow, select **New step**. - ![Add the new step under the trigger](./media/logic-apps-add-run-inline-code/add-new-step.png) + * To add the action between steps, move your mouse pointer over the arrow that connects those steps. Select the plus sign (**+**) that appears, and select **Add an action**. -1. In the action search box, enter `inline code`. From the actions list, select the action named **Execute JavaScript Code**. +1. In the **Choose an operation** search box, enter **inline code**. From the actions list, select the action named **Execute JavaScript Code**. - ![Select the "Execute JavaScript Code" action](./media/logic-apps-add-run-inline-code/select-inline-code-action.png) + ![Screenshot showing Consumption workflow designer and "Execute JavaScript Code" action selected.](./media/logic-apps-add-run-inline-code/select-inline-code-action-consumption.png) The action appears in the designer and by default, contains some sample code, including a `return` statement. - ![Inline Code action with default sample code](./media/logic-apps-add-run-inline-code/inline-code-action-default.png) + ![Screenshot showing the Inline Code action with default sample code.](./media/logic-apps-add-run-inline-code/inline-code-action-default-consumption.png) 1. In the **Code** box, delete the sample code, and enter your code. Write the code that you'd put inside a method, but without the method signature. + > [!TIP] + > + > When your cursor is in the **Code** box, the dynamic content list appears. Although you'll + > use this list later, you can ignore and leave the list open for now. Don't select **Hide**. + If you start typing a recognized keyword, the autocomplete list appears so that you can select from available keywords, for example: - ![Keyword autocomplete list](./media/logic-apps-add-run-inline-code/auto-complete.png) + ![Screenshot showing the Consumption workflow, Inline Code action, and keyword autocomplete list.](./media/logic-apps-add-run-inline-code/auto-complete-consumption.png) - This example code snippet first creates a variable that stores a *regular expression*, which specifies a pattern to match in input text. The code then creates a variable that stores the email body data from the trigger. + The following example code snippet first creates a variable named **myResult** that stores a *regular expression*, which specifies a pattern to match in input text. The code then creates a variable named **email** that stores the email message's body content from the trigger outputs. - ![Create variables](./media/logic-apps-add-run-inline-code/save-email-body-variable.png) + ![Screenshot showing the Consumption workflow, Inline Code action, and example code that creates variables.](./media/logic-apps-add-run-inline-code/save-email-body-variable-consumption.png) - To make the results from the trigger and previous actions easier to reference, the dynamic content list appears when your cursor is inside the **Code** box. For this example, the list shows available results from the trigger, including the **Body** token, which you can now select. +1. With your cursor still in the **Code** box, from the open dynamic content list, find the **When a new email arrives** section, and select the **Body** property, which references the email message's body. - After you select the **Body** token, the inline code action resolves the token to a `workflowContext` object that references the email's `Body` property value: + ![Screenshot showing the Consumption workflow, Inline Code action, dynamic content list, and email message's "Body" property selected.](./media/logic-apps-add-run-inline-code/select-output-consumption.png) - ![Select result](./media/logic-apps-add-run-inline-code/inline-code-example-select-outputs.png) + The dynamic content list shows the outputs from the trigger and any preceding actions when those outputs match the input format for the edit box that's currently in focus. This list makes these outputs easier to use and reference from your workflow. For this example, the list shows the outputs from the Outlook trigger, including the email message's **Body** property. - In the **Code** box, your snippet can use the read-only `workflowContext` object as input. This object includes properties that give your code access to the results from the trigger and previous actions in your workflow. For more information, see [Reference trigger and action results in your code](#workflowcontext) later in this topic. + After you select the **Body** property, the Inline Code action resolves the token to a read-only `workflowContext` JSON object, which your snippet can use as input. The `workflowContext` object includes properties that give your code access to the outputs from the trigger and preceding actions in your workflow, such as the trigger's `body` property, which differs from the email message's **Body** property. For more information about the `workflowContext` object, see [Reference trigger and action outputs using the workflowContext object](#workflowcontext) later in this article. - > [!NOTE] - > If your code snippet references action names that use the dot (.) operator, you must add those - > action names to the [**Actions** parameter](#add-parameters). Those references must also enclose - > the action names with square brackets ([]) and quotation marks, for example: + > [!IMPORTANT] + > + > If your code snippet references action names that include the dot (**.**) operator, + > those references have to enclose these action names with square brackets (**[]**) + > and quotation marks (**""**), for example: > - > `// Correct`
    - > `workflowContext.actions["my.action.name"].body`
    + > `// Correct`
    + > `workflowContext.actions["my.action.name"].body` > > `// Incorrect`
    > `workflowContext.actions.my.action.name.body` + > + > Also, in the Inline Code action, you have to add the [**Actions** parameter](#add-parameters) + > and then add these action names to that parameter. For more information, see + > [Add dependencies as parameters to an Inline Code action](#add-parameters) later in this article. + +1. To differentiate the email message's **Body** property that you selected from the trigger's `body` property, rename the second `body` property to `Body` instead. Add the closing semicolon (**;**) at the end to finish the code statement. + + ![Screenshot showing the Consumption logic app workflow, Inline Code action, and renamed "Body" property with closing semicolon.](./media/logic-apps-add-run-inline-code/rename-body-property-consumption.png) + + The Inline Code action doesn't syntactically require a `return` statement. However, by including the `return` statement, you can more easily reference the action results later in your workflow by using the **Result** token in later actions. + + In this example, the code snippet returns the result by calling the `match()` function, which finds any matches in the email message body to the specified regular expression. The **Create HTML table** action then uses the **Result** token to reference the results from the Inline Code action and creates a single result. + + ![Screenshot showing the finished Consumption logic app workflow.](./media/logic-apps-add-run-inline-code/inline-code-complete-example-consumption.png) + +1. When you're done, save your workflow. + +### [Standard](#tab/standard) + +1. In the [Azure portal](https://portal.azure.com), open your logic app workflow in the designer. + +1. On the designer, add the Inline Code action to your workflow. You can add an action either as a new step at the end of your workflow or between steps. This example adds the action under the Office 365 Outlook trigger. + + * To add the action at the end of your workflow, select the plus sign (**+**), and then select **Add an action**. + + * To add the action between steps, move your mouse pointer over the arrow that connects those steps. Select the plus sign (**+**) that appears, and select **Add an action**. + +1. In the **Choose an operation** search box, enter **inline code**. From the actions list, select the action named **Execute JavaScript Code**. - The Inline Code action doesn't require a `return` statement, but the results from a `return` statement are available for reference in later actions through the **Result** token. For example, the code snippet returns the result by calling the `match()` function, which finds matches in the email body against the regular expression. The **Compose** action uses the **Result** token to reference the results from the inline code action and creates a single result. + ![Screenshot showing Standard workflow designer and "Execute JavaScript Code" action selected.](./media/logic-apps-add-run-inline-code/select-inline-code-action-standard.png) - ![Finished logic app](./media/logic-apps-add-run-inline-code/inline-code-complete-example.png) +1. In the **code** box, enter your code. Write the code that you'd put inside a method, but without the method signature. + + > [!TIP] + > + > When your cursor is in the **code** box, the dynamic content list appears. Although you'll + > use this list later, you can ignore and leave the list open for now. Don't select **Hide**. + + If you start typing a recognized keyword, the autocomplete list appears so that you can select from available keywords, for example: + + ![Screenshot showing the Standard workflow, Inline Code action, and keyword autocomplete list.](./media/logic-apps-add-run-inline-code/auto-complete-standard.png) + + The following example code snippet first creates a variable named **myResult** that stores a *regular expression*, which specifies a pattern to match in input text. The code then creates a variable named **email** that stores the email message's body content from the trigger outputs. + + ![Screenshot showing the Standard workflow, Inline Code action, and example code that creates variables.](./media/logic-apps-add-run-inline-code/save-email-body-variable-standard.png) + +1. With your cursor still in the **code** box, from the open dynamic content list, find the **When a new email arrives** section, and select the **Body** token, which references the email's message body. + + ![Screenshot showing the Standard workflow, Inline Code action, dynamic content list, and email message's "Body" property selected.](./media/logic-apps-add-run-inline-code/select-output-standard.png) + + The dynamic content list shows the outputs from the trigger and any preceding actions where those outputs match the input format for the edit box that's currently in focus. This list makes these outputs easier to use and reference from your workflow. For this example, the list shows the outputs from the Outlook trigger, including the email message's **Body** property. + + After you select the **Body** property, the Inline Code action resolves the token to a read-only `workflowContext` JSON object, which your snippet can use as input. The `workflowContext` object includes properties that give your code access to the outputs from the trigger and preceding actions in your workflow, such as the trigger's `body` property, which differs from the email message's **Body** property. For more information about the `workflowContext` object, see [Reference trigger and action outputs using the workflowContext object](#workflowcontext) later in this article. + + > [!IMPORTANT] + > + > If your code snippet references action names that include the dot (**.**) operator, + > those references have to enclose these action names with square brackets (**[]**) + > and quotation marks (**""**), for example: + > + > `// Correct`
    + > `workflowContext.actions["my.action.name"].body` + > + > `// Incorrect`
    + > `workflowContext.actions.my.action.name.body` + > + > Also, in the Inline Code action, you have to add the **Actions** parameter + > and then add these action names to that parameter. For more information, see + > [Add dependencies as parameters to an Inline Code action](#add-parameters) later in this article. -1. When you're done, save your logic app. +1. To differentiate the email message's **Body** property that you selected from the trigger's `body` property, rename the second `body` property to `Body` instead. Add the closing semicolon (**;**) at the end to finish the code statement. + + ![Screenshot showing the Standard logic app workflow, Inline Code action, and renamed "Body" property with closing semicolon.](./media/logic-apps-add-run-inline-code/rename-body-property-standard.png) + + The Inline Code action doesn't syntactically require a `return` statement. However, by including the `return` statement, you can reference the action results later in your workflow by using the **Outputs** token in later actions. + + In this example, the code snippet returns the result by calling the `match()` function, which finds any matches in the email message body to the specified regular expression. + + ![Screenshot showing the Standard logic app workflow and Inline Code action with "return" statement.](./media/logic-apps-add-run-inline-code/return-statement-standard.png) + + The **Create HTML table** action then uses the **Outputs** token to reference the results from the Inline Code action and creates a single result. + + ![Screenshot showing the finished Standard logic app workflow.](./media/logic-apps-add-run-inline-code/inline-code-complete-example-standard.png) + +1. When you're done, save your workflow. + +--- -### Reference trigger and action results in your code +### Reference trigger and action outputs using the workflowContext object -The `workflowContext` object has this structure, which includes the `actions`, `trigger`, and `workflow` subproperties: +From inside your code snippet on the designer, you can use the dynamic content list to select a token that references the output from the trigger or any preceding action. When you select the token, the Inline Code action resolves that token to a read-only `workflowContext` JSON object. This object gives your code access to the outputs from the trigger, any preceding actions, and the workflow. The object uses the following structure and includes the `actions`, `trigger`, and `workflow` properties, which are also objects: ```json { @@ -133,16 +235,16 @@ The `workflowContext` object has this structure, which includes the `actions`, ` } ``` -This table contains more information about these subproperties: +The following table has more information about these properties: | Property | Type | Description | -|----------|------|-------| -| `actions` | Object collection | Result objects from actions that run before your code snippet runs. Each object has a *key-value* pair where the key is the name of an action, and the value is equivalent to calling the [actions() function](../logic-apps/workflow-definition-language-functions-reference.md#actions) with `@actions('')`. The action's name uses the same action name that's used in the underlying workflow definition, which replaces spaces (" ") in the action name with underscores (_). This object provides access to action property values from the current workflow instance run. | -| `trigger` | Object | Result object from the trigger and equivalent to calling the [trigger() function](../logic-apps/workflow-definition-language-functions-reference.md#trigger). This object provides access to trigger property values from the current workflow instance run. | -| `workflow` | Object | The workflow object and equivalent to calling the [workflow() function](../logic-apps/workflow-definition-language-functions-reference.md#workflow). This object provides access to workflow property values, such as the workflow name, run ID, and so on, from the current workflow instance run. | -||| +|----------|------|-------------| +| `actions` | Object collection | The result objects from any preceding actions that run before your code snippet runs. Each object has a *key-value* pair where the key is the action name, and the value is equivalent to the result from calling the [actions() function](workflow-definition-language-functions-reference.md#actions) with the `@actions('')` expression.

    The action's name uses the same action name that appears in the underlying workflow definition, which replaces spaces (**" "**) in the action name with underscores (**\_**). This object collection provides access to the action's property values from the current workflow instance run. | +| `trigger` | Object | The result object from the trigger where the result is the equivalent to calling the [trigger() function](workflow-definition-language-functions-reference.md#trigger). This object provides access to trigger's property values from the current workflow instance run. | +| `workflow` | Object | The workflow object that is the equivalent to calling the [workflow() function](workflow-definition-language-functions-reference.md#workflow). This object provides access to the property values, such as the workflow name, run ID, and so on, from the current workflow instance run. | +|||| -In this topic's example, the `workflowContext` object has these properties that your code can access: +In this article's example, the `workflowContext` JSON object might have the following sample properties and values from the Outlook trigger: ```json { @@ -212,65 +314,98 @@ In this topic's example, the `workflowContext` object has these properties that -## Add parameters +## Add dependencies as parameters to an Inline Code action -In some cases, you might have to explicitly require that the Inline Code action includes results from the trigger or specific actions that your code references as dependencies by adding the **Trigger** or **Actions** parameters. This option is useful for scenarios where the referenced results aren't found at run time. +In some scenarios, you might have to explicitly require that the Inline Code action includes outputs from the trigger or actions that your code references as dependencies. For example, you have to take this extra step when your code references outputs that aren't available at workflow run time. During workflow creation time, the Azure Logic Apps engine analyzes the code snippet to determine whether the code references any trigger or action outputs. If those references exist, the engine includes those outputs automatically. At workflow run time, if the referenced trigger or action output isn't found in the `workflowContext` object, the engine generates an error. To resolve this error, you have to add that trigger or action as an explicit dependency for the Inline Code action. Another scenario that requires you to take this step is when the `workflowContext` object references a trigger or action name that uses the dot operator (**.**). -> [!TIP] -> If you plan to reuse your code, add references to properties by using the **Code** box so that your code -> includes the resolved token references, rather than adding the trigger or actions as explicit dependencies. +To add a trigger or action as a dependency, you add the **Trigger** or **Actions** parameters as applicable to the Inline Code action. You then add the trigger or action names as they appear in your workflow's underlying JSON definition. -For example, suppose you have code that references the **SelectedOption** result from the **Send approval email** action for the Office 365 Outlook connector. At create time, the Logic Apps engine analyzes your code to determine whether you've referenced any trigger or action results and includes those results automatically. At run time, should you get an error that the referenced trigger or action result isn't available in the specified `workflowContext` object, you can add that trigger or action as an explicit dependency. In this example, you add the **Actions** parameter and specify that the Inline Code action explicitly include the result from the **Send approval email** action. +> [!NOTE] +> +> You can't add **Variables** operations, loops such as **For each** or **Until**, and iteration +> indexes as explicit dependencies. +> +> If you plan to reuse your code, make sure to always use the code snippet edit box to reference +> trigger and action outputs. That way, your code includes the resolved token references, rather than +> just add the trigger or action outputs as explicit dependencies. -To add these parameters, open the **Add new parameter** list, and select the parameters you want: +For example, suppose the Office 365 Outlook connector's **Send approval email** action precedes the code snippet in the sample workflow. The following example code snippet includes a reference to the **SelectedOption** output from this action. - ![Add parameters](./media/logic-apps-add-run-inline-code/inline-code-action-add-parameters.png) +### [Consumption](#tab/consumption) - | Parameter | Description | - |-----------|-------------| - | **Actions** | Include results from previous actions. See [Include action results](#action-results). | - | **Trigger** | Include results from the trigger. See [Include trigger results](#trigger-results). | - ||| - - - -### Include trigger results +![Screenshot that shows the Consumption workflow and Inline Code action with updated example code snippet.](./media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-consumption.png) -If you select **Triggers**, you're prompted whether to include trigger results. +### [Standard](#tab/standard) -* From the **Trigger** list, select **Yes**. +![Screenshot that shows the Standard workflow and Inline Code action with updated example code snippet.](./media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-standard.png) - +--- -### Include action results +For this example, you have to add only the **Actions** parameter, and then add the action's JSON name, `Send_approval_email`, to the parameter. That way, you specify that the Inline Code action explicitly includes the output from the **Send approval email** action. -If you select **Actions**, you're prompted for the actions that you want to add. However, before you start adding actions, you need the version of the action name that appears in the logic app's underlying workflow definition. +### Find the trigger or action's JSON name -* This capability doesn't support variables, loops, and iteration indexes. +Before you start, you need the JSON name for the trigger or action in the underlying workflow definition. -* Names in your logic app's workflow definition use an underscore (_), not a space. +* Names in your workflow definition use an underscore (_), not a space. -* For action names that use the dot operator (.), include those operators, for example: +* If an action name uses the dot operator (.), include that operator, for example: `My.Action.Name` -1. On the designer toolbar, select **Code view**, and search inside the `actions` attribute for the action name. +### [Consumption](#tab/consumption) + +1. On the workflow designer toolbar, select **Code view**. In the `actions` object, find the action's name. - For example, `Send_approval_email_` is the JSON name for the **Send approval email** action. + For example, `Send_approval_email` is the JSON name for the **Send approval email** action. - ![Find action name in JSON](./media/logic-apps-add-run-inline-code/find-action-name-json.png) + ![Screenshot showing the action name in JSON.](./media/logic-apps-add-run-inline-code/find-action-name-json.png) 1. To return to designer view, on the code view toolbar, select **Designer**. -1. To add the first action, in the **Actions Item - 1** box, enter the action's JSON name. +1. Now add the JSON name to the Inline Code action. + +### [Standard](#tab/standard) + +1. On the workflow menu, select **Code**. In the `actions` object, find the action's name. + + For example, `Send_approval_email` is the JSON name for the **Send approval email** action. + + ![Screenshot showing the action name in JSON.](./media/logic-apps-add-run-inline-code/find-action-name-json.png) + +1. To return to designer view, on the workflow menu, select **Designer**. + +1. Now add the JSON name to the Inline Code action. + +--- + +### Add the trigger or action name to the Inline Code action + +1. In the Inline Code action, open the **Add new parameter** list. + +1. From the parameters list, select the following parameters as your scenario requires. + + | Parameter | Description | + |-----------|-------------| + | **Actions** | Include outputs from preceding actions as dependencies. When you select this parameter, you're prompted for the actions that you want to add. | + | **Trigger** | Include outputs from the trigger as dependencies. When you select this parameter, you're prompted whether to include trigger results. So, from the **Trigger** list, select **Yes**. | + ||| + +1. For this example, select the **Actions** parameter. + + ![Screenshot showing the Inline Code action and "Actions" parameter selected.](./media/logic-apps-add-run-inline-code/add-actions-parameter.png) + +1. In the **Actions Item - 1** box, enter the action's JSON name. + + ![Screenshot showing the "Actions Item -1" box and the action's JSON name.](./media/logic-apps-add-run-inline-code/add-action-json-name.png) - ![Enter first action](./media/logic-apps-add-run-inline-code/add-action-parameter.png) +1. To add another action name, select **Add new item**. -1. To add another action, select **Add new item**. +1. When you're done, save your workflow. -## Reference +## Action reference -For more information about the **Execute JavaScript Code** action's structure and syntax in your logic app's underlying workflow definition using the Workflow Definition Language, see this action's [reference section](../logic-apps/logic-apps-workflow-actions-triggers.md#run-javascript-code). +For more information about the **Execute JavaScript Code** action's structure and syntax in your underlying workflow definition using the Workflow Definition Language, see this action's [reference section](logic-apps-workflow-actions-triggers.md#run-javascript-code). ## Next steps diff --git a/articles/logic-apps/logic-apps-diagnosing-failures.md b/articles/logic-apps/logic-apps-diagnosing-failures.md index 2ddbcf2ba9e9..e44b28dc2cf5 100644 --- a/articles/logic-apps/logic-apps-diagnosing-failures.md +++ b/articles/logic-apps/logic-apps-diagnosing-failures.md @@ -5,78 +5,199 @@ services: logic-apps ms.suite: integration ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 01/31/2020 +ms.date: 05/24/2022 --- # Troubleshoot and diagnose workflow failures in Azure Logic Apps -Your logic app generates information that can help you diagnose and debug problems in your app. You can diagnose a logic app by reviewing each step in the workflow through the Azure portal. Or, you can add some steps to a workflow for runtime debugging. +Your logic app workflow generates information that can help you diagnose and debug problems in your app. You can diagnose your workflow by reviewing the inputs, outputs, and other information for each step in the workflow using the Azure portal. Or, you can add some steps to a workflow for runtime debugging. ## Check trigger history -Each logic app run starts with a trigger attempt, so if the trigger doesn't fire, follow these steps: +Each workflow run starts with a trigger, which either fires on a schedule or waits for an incoming request or event. The trigger history lists all the trigger attempts that your workflow made and information about the inputs and outputs for each trigger attempt. If the trigger doesn't fire, try the following steps. -1. Check the trigger's status by [checking the trigger history](../logic-apps/monitor-logic-apps.md#review-trigger-history). To view more information about the trigger attempt, select that trigger event, for example: +### [Consumption](#tab/consumption) - ![View trigger status and history](./media/logic-apps-diagnosing-failures/logic-app-trigger-history.png) +1. To check the trigger's status in your Consumption logic app, [review the trigger history](monitor-logic-apps.md#review-trigger-history). To view more information about the trigger attempt, select that trigger event, for example: -1. Check the trigger's inputs to confirm that they appear as you expect. Under **Inputs link**, select the link, which shows the **Inputs** pane. + ![Screenshot showing Azure portal with Consumption logic app workflow trigger history.](./media/logic-apps-diagnosing-failures/logic-app-trigger-history-consumption.png) + +1. Check the trigger's inputs to confirm that they appear as you expect. On the **History** pane, under **Inputs link**, select the link, which shows the **Inputs** pane. + + Trigger inputs include the data that the trigger expects and requires to start the workflow. Reviewing these inputs can help you determine whether the trigger inputs are correct and whether the condition was met so that the workflow can continue. + + ![Screenshot showing Consumption logic app workflow trigger inputs.](./media/logic-apps-diagnosing-failures/review-trigger-inputs-consumption.png) + +1. Check the triggers outputs, if any, to confirm that they appear as you expect. On the **History** pane, under **Outputs link**, select the link, which shows the **Outputs** pane. + + Trigger outputs include the data that the trigger passes to the next step in your workflow. Reviewing these outputs can help you determine whether the correct or expected values passed on to the next step in your workflow. + + For example, an error message states that the RSS feed wasn't found: + + ![Screenshot showing Consumption logic app workflow trigger outputs.](./media/logic-apps-diagnosing-failures/review-trigger-outputs-consumption.png) + + > [!TIP] + > + > If you find any content that you don't recognize, learn more about + > [different content types](../logic-apps/logic-apps-content-type.md) in Azure Logic Apps. + +### [Standard](#tab/standard) + +1. To check the trigger's status in your Standard logic app, [review the trigger history](monitor-logic-apps.md#review-trigger-history). To view more information about the trigger attempt, select that trigger event, for example: + + ![Screenshot showing Azure portal with Standard logic app workflow trigger history.](./media/logic-apps-diagnosing-failures/logic-app-trigger-history-standard.png) + +1. Check the trigger's inputs to confirm that they appear as you expect. On the **History** pane, under **Inputs link**, select the link, which shows the **Inputs** pane. Trigger inputs include the data that the trigger expects and requires to start the workflow. Reviewing these inputs can help you determine whether the trigger inputs are correct and whether the condition was met so that the workflow can continue. - For example, the `feedUrl` property here has an incorrect RSS feed value: + ![Screenshot showing Standard logic app workflow trigger inputs.](./media/logic-apps-diagnosing-failures/review-trigger-inputs-standard.png) - ![Review trigger inputs for errors](./media/logic-apps-diagnosing-failures/review-trigger-inputs-for-errors.png) +1. Check the triggers outputs, if any, to confirm that they appear as you expect. On the **History** pane, under **Outputs link**, select the link, which shows the **Outputs** pane. -1. Check the triggers outputs, if any, to confirm that they appear as you expect. Under **Outputs link**, select the link, which shows the **Outputs** pane. + Trigger outputs include the data that the trigger passes to the next step in your workflow. Reviewing these outputs can help you determine whether the correct or expected values passed on to the next step in your workflow. - Trigger outputs include the data that the trigger passes to the next step in your workflow. Reviewing these outputs can help you determine whether the correct or expected values passed on to the next step in your workflow, for example: + For example, an error message states that the RSS feed wasn't found: - ![Review trigger outputs for errors](./media/logic-apps-diagnosing-failures/review-trigger-outputs-for-errors.png) + ![Screenshot showing Standard logic app workflow trigger outputs.](./media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png) > [!TIP] + > > If you find any content that you don't recognize, learn more about > [different content types](../logic-apps/logic-apps-content-type.md) in Azure Logic Apps. +--- + -## Check runs history +## Check workflow run history + +Each time that the trigger fires, Azure Logic Apps creates a workflow instance and runs that instance. If a run fails, try the following steps so you can review what happened during that run. You can review the status, inputs, and outputs for each step in the workflow. + +### [Consumption](#tab/consumption) + +1. To check the workflow's run status in your Consumption logic app, [review the runs history](monitor-logic-apps.md#review-runs-history). To view more information about a failed run, including all the steps in that run in their status, select the failed run. + + ![Screenshot showing Azure portal with Consumption logic app workflow runs and a failed run selected.](./media/logic-apps-diagnosing-failures/logic-app-runs-history-consumption.png) + +1. After all the steps in the run appear, select each step to expand their shapes. + + ![Screenshot showing Consumption logic app workflow with failed step selected.](./media/logic-apps-diagnosing-failures/logic-app-run-pane-consumption.png) -Each time that the trigger fires for an item or event, the Logic Apps engine creates and runs a separate workflow instance for each item or event. If a run fails, follow these steps to review what happened during that run, including the status for each step in the workflow plus the inputs and outputs for each step. +1. Review the inputs, outputs, and any error messages for the failed step. -1. Check the workflow's run status by [checking the runs history](../logic-apps/monitor-logic-apps.md#review-runs-history). To view more information about a failed run, including all the steps in that run in their status, select the failed run. + ![Screenshot showing Consumption logic app workflow with failed step details.](./media/logic-apps-diagnosing-failures/failed-action-inputs-consumption.png) - ![View run history and select failed run](./media/logic-apps-diagnosing-failures/logic-app-runs-history.png) + For example, the following screenshot shows the outputs from the failed RSS action. -1. After all the steps in the run appear, expand the first failed step. + ![Screenshot showing Consumption logic app workflow with failed step outputs.](./media/logic-apps-diagnosing-failures/failed-action-outputs-consumption.png) - ![Expand first failed step](./media/logic-apps-diagnosing-failures/logic-app-run-pane.png) +### [Standard](#tab/standard) -1. Check the failed step's inputs to confirm whether they appear as you expect. +1. To check the workflow's run status in your Standard logic app, [review the runs history](monitor-logic-apps.md#review-runs-history). To view more information about a failed run, including all the steps in that run in their status, select the failed run. -1. Review the details for each step in a specific run. Under **Runs history**, select the run that you want to examine. + ![Screenshot showing Azure portal with Standard logic app workflow runs and a failed run selected.](./media/logic-apps-diagnosing-failures/logic-app-runs-history-standard.png) - ![Review runs history](./media/logic-apps-diagnosing-failures/logic-app-runs-history.png) +1. After all the steps in the run appear, select each step to review their details. - ![View details for a logic app run](./media/logic-apps-diagnosing-failures/logic-app-run-details.png) + ![Screenshot showing Standard logic app workflow with failed step selected.](./media/logic-apps-diagnosing-failures/logic-app-run-pane-standard.png) -1. To examine the inputs, outputs, and any error messages for a specific step, choose that step so that the shape expands and shows the details. For example: +1. Review the inputs, outputs, and any error messages for the failed step. - ![View step details](./media/logic-apps-diagnosing-failures/logic-app-run-details-expanded.png) + ![Screenshot showing Standard logic app workflow with failed step inputs.](./media/logic-apps-diagnosing-failures/failed-action-inputs-standard.png) + + For example, the following screenshot shows the outputs from the failed RSS action. + + ![Screenshot showing Standard logic app workflow with failed step outputs.](./media/logic-apps-diagnosing-failures/failed-action-outputs-standard.png) + +--- ## Perform runtime debugging -To help with debugging, you can add diagnostic steps to a logic app workflow, along with reviewing the trigger and runs history. For example, you can add steps that use the [Webhook Tester](https://webhook.site/) service so that you can inspect HTTP requests and determine their exact size, shape, and format. +To help with debugging, you can add diagnostic steps to a logic app workflow, along with reviewing the trigger and runs history. For example, you can add steps that use the [Webhook Tester](https://webhook.site/) service, so you can inspect HTTP requests and determine their exact size, shape, and format. -1. Go to the [Webhook Tester](https://webhook.site/) site and copy the generated unique URL. +1. In a browser, go to the [Webhook Tester](https://webhook.site/) site, and copy the generated unique URL. -1. In your logic app, add an HTTP POST action plus the body content that you want to test, for example, an expression or another step output. +1. In your logic app, add an HTTP POST action with the body content that you want to test, for example, an expression or another step output. 1. Paste your URL from Webhook Tester into the HTTP POST action. -1. To review how a request is formed when generated from the Logic Apps engine, run the logic app, and revisit the Webhook Tester site for more details. +1. To review how Azure Logic Apps generates and forms a request, run the logic app workflow. You can then revisit the Webhook Tester site for more information. + +## Common problems - Standard logic apps + +### Inaccessible artifacts in Azure storage account + +Standard logic apps store all artifacts in an Azure storage account. You might get the following errors if these artifacts aren't accessible. For example, the storage account itself might not be accessible, or the storage account is behind a firewall but no private endpoint is set up for the storage services to use. + +| Azure portal location | Error | +|-----------------------|-------| +| Overview pane | - **System.private.corelib:Access to the path 'C:\\home\\site\\wwwroot\\hostj.son is denied**

    - **Azure.Storage.Blobs: This request is not authorized to perform this operation** | +| Workflows pane | - **Cannot reach host runtime. Error details, Code: 'BadRequest', Message: 'Encountered an error (InternalServerError) from host runtime.'**

    - **Cannot reach host runtime. Error details, Code: 'BadRequest', Message: 'Encountered an error (ServiceUnavailable) from host runtime.'**

    - **Cannot reach host runtime. Error details, Code: 'BadRequest', Message: 'Encountered an error (BadGateway) from host runtime.'** | +| During workflow creation and execution | - **Failed to save workflow**

    - **Error in the designer: GetCallFailed. Failed fetching operations**

    - **ajaxExtended call failed** | +||| + +### Troubleshooting options + +The following list includes possible causes for these errors and steps to help troubleshoot. + +* For a public storage account, check access to the storage account in the following ways: + + * Check the storage account's connectivity using [Azure Storage Explorer](../vs-azure-tools-storage-manage-with-storage-explorer.md). + + * In your logic app resource's app settings, confirm the storage account's connection string in the app settings, **AzureWebJobsStorage** and **WEBSITE_CONTENTAZUREFILECONNECTIONSTRING**. For more information, review [Host and app settings for logic apps in single-tenant Azure Logic Apps](edit-app-settings-host-settings.md#manage-app-settings). + + If connectivity fails, check whether the Shared Access Signature (SAS) key in the connection string is the most recent. + +* For a storage account that's behind a firewall, check access to the storage account in the following ways: + + * If firewall restrictions are enabled on the storage account, check whether [private endpoints](../private-link/private-endpoint-overview.md) are set up for Blob, File, Table, and Queue storage services. + + * Check the storage account's connectivity using [Azure Storage Explorer](../vs-azure-tools-storage-manage-with-storage-explorer.md). + + If you find connectivity problems, continue with the following steps: + + 1. In the same virtual network that's integrated with your logic app, create an Azure virtual machine, which you can put in a different subnet. + + 1. From a command prompt, run **nslookup** to check that the Blob, File, Table, and Queue storage services resolve to the expected IP addresses. + + Syntax: `nslookup [StorageaccountHostName] [OptionalDNSServer]` + + Blob: `nslookup {StorageaccountName}.blob.core.windows.net` + + File: `nslookup {StorageaccountName}.file.core.windows.net` + + Table: `nslookup {StorageaccountName}.table.core.windows.net` + + Queue: `nslookup {StorageaccountName}.queue.core.windows.net` + + * If the storage service has a [Service Endpoint](../virtual-network/virtual-network-service-endpoints-overview.md), the service resolves to a public IP address. + + * If the storage service has a [private endpoint](../private-link/private-endpoint-overview.md), the service resolves to the respective network interface controller (NIC) private IP addresses. + + 1. If the previous domain name server (DNS) queries resolve successfully, run the **psping** or **tcpping** commands to check connectivity to the storage account over port 443: + + Syntax: `psping [StorageaccountHostName] [Port] [OptionalDNSServer]` + + Blob: `psping {StorageaccountName}.blob.core.windows.net:443` + + File: `psping {StorageaccountName}.file.core.windows.net:443` + + Table: `psping {StorageaccountName}.table.core.windows.net:443` + + Queue: `psping {StorageaccountName}.queue.core.windows.net:443` + + 1. If each storage service is resolvable from your Azure virtual machine, find the DNS that's used by the virtual machine for resolution. + + 1. Set your logic app's **WEBSITE_DNS_SERVER** app setting to the DNS, and confirm that the DNS works successfully. + + 1. Confirm that VNet integration is set up correctly with appropriate virtual network and subnet in your Standard logic app. + + 1. If you use [private Azure DNS zones](../dns/private-dns-privatednszone.md) for your storage account's private endpoint services, check that a [virtual network link](../dns/private-dns-virtual-network-links.md) has been created to your logic app's integrated virtual network. + +For more information, review [Deploy Standard logic app to a storage account behind a firewall using service or private endpoints](https://techcommunity.microsoft.com/t5/integrations-on-azure-blog/deploying-standard-logic-app-to-storage-account-behind-firewall/ba-p/2626286). ## Next steps diff --git a/articles/logic-apps/logic-apps-limits-and-config.md b/articles/logic-apps/logic-apps-limits-and-config.md index e626fa6b9695..35e5d1b7673a 100644 --- a/articles/logic-apps/logic-apps-limits-and-config.md +++ b/articles/logic-apps/logic-apps-limits-and-config.md @@ -657,7 +657,7 @@ This section lists the outbound IP addresses that Azure Logic Apps requires in y | West Europe | 40.68.222.65, 40.68.209.23, 13.95.147.65, 23.97.218.130, 51.144.182.201, 23.97.211.179, 104.45.9.52, 23.97.210.126, 13.69.71.160, 13.69.71.161, 13.69.71.162, 13.69.71.163, 13.69.71.164, 13.69.71.165, 13.69.71.166, 13.69.71.167, 20.103.21.81, 20.103.17.247, 20.103.17.223, 20.103.16.47, 20.103.58.116, 20.103.57.29, 20.101.174.49, 20.101.174.23, 20.93.236.26, 20.93.235.107, 20.103.94.250, 20.76.174.72, 20.82.87.192, 20.82.87.16, 20.76.170.145, 20.103.91.39, 20.103.84.41, 20.76.161.156 | | West India | 104.211.164.80, 104.211.162.205, 104.211.164.136, 104.211.158.127, 104.211.156.153, 104.211.158.123, 104.211.154.59, 104.211.154.7 | | West US | 52.160.92.112, 40.118.244.241, 40.118.241.243, 157.56.162.53, 157.56.167.147, 104.42.49.145, 40.83.164.80, 104.42.38.32, 13.86.223.0, 13.86.223.1, 13.86.223.2, 13.86.223.3, 13.86.223.4, 13.86.223.5, 104.40.34.169, 104.40.32.148, 52.160.70.221, 52.160.70.105, 13.91.81.221, 13.64.231.196, 13.87.204.182, 40.78.65.193, 13.87.207.39, 104.42.44.28, 40.83.134.97, 40.78.65.112, 168.62.9.74, 168.62.28.191 | -| West US 2 | 13.66.210.167, 52.183.30.169, 52.183.29.132, 13.66.210.167, 13.66.201.169, 13.77.149.159, 52.175.198.132, 13.66.246.219, 20.99.189.158, 20.99.189.70, 20.72.244.58, 20.72.243.225 | +| West US 2 | 13.66.210.167, 52.183.30.169, 52.183.29.132, 13.66.201.169, 13.77.149.159, 52.175.198.132, 13.66.246.219, 20.99.189.158, 20.99.189.70, 20.72.244.58, 20.72.243.225 | | West US 3 | 20.150.181.32, 20.150.181.33, 20.150.181.34, 20.150.181.35, 20.150.181.36, 20.150.181.37, 20.150.181.38, 20.150.173.192, 20.106.85.228, 20.150.159.163, 20.106.116.207, 20.106.116.186 | ||| diff --git a/articles/logic-apps/logic-apps-securing-a-logic-app.md b/articles/logic-apps/logic-apps-securing-a-logic-app.md index fafcb6bbfa1e..086bdf02a90b 100644 --- a/articles/logic-apps/logic-apps-securing-a-logic-app.md +++ b/articles/logic-apps/logic-apps-securing-a-logic-app.md @@ -35,8 +35,7 @@ For more information about security in Azure, review these topics: ## Access to logic app operations -For Consumption logic apps only, before you can create or manage logic apps and their connections, you need specific permissions, which are provided through roles using [Azure role-based access control (Azure RBAC)](../role-based-access-control/role-assignments-portal.md). You can also -you can set up permissions so that only specific users or groups can run specific tasks, such as managing, editing, and viewing logic apps. To control their permissions, you can assign built-in or customized roles to members who have access to your Azure subscription. Azure Logic Apps has the following specific roles: +For Consumption logic apps only, before you can create or manage logic apps and their connections, you need specific permissions, which are provided through roles using [Azure role-based access control (Azure RBAC)](../role-based-access-control/role-assignments-portal.md). You can also set up permissions so that only specific users or groups can run specific tasks, such as managing, editing, and viewing logic apps. To control their permissions, you can assign built-in or customized roles to members who have access to your Azure subscription. Azure Logic Apps has the following specific roles: * [Logic App Contributor](../role-based-access-control/built-in-roles.md#logic-app-contributor): Lets you manage logic apps, but you can't change access to them. diff --git a/articles/logic-apps/logic-apps-workflow-actions-triggers.md b/articles/logic-apps/logic-apps-workflow-actions-triggers.md index 12b1ed86f15c..a9fe6f453c98 100644 --- a/articles/logic-apps/logic-apps-workflow-actions-triggers.md +++ b/articles/logic-apps/logic-apps-workflow-actions-triggers.md @@ -1029,7 +1029,7 @@ This action definition merges `abcdefg ` with a trailing space and the value `12 }, ``` -Here is the output that this action creates: +Here's the output that this action creates: `abcdefg 1234` @@ -1045,7 +1045,7 @@ This action definition merges a string variable that contains `abcdefg` and an i }, ``` -Here is the output that this action creates: +Here's the output that this action creates: `"abcdefg1234"` @@ -1053,7 +1053,7 @@ Here is the output that this action creates: ### Execute JavaScript Code action -This action runs a JavaScript code snippet and returns the results through a `Result` token that later actions can reference. +This action runs a JavaScript code snippet and returns the results through a token that subsequent actions in the workflow can reference. ```json "Execute_JavaScript_Code": { @@ -1061,7 +1061,7 @@ This action runs a JavaScript code snippet and returns the results through a `Re "inputs": { "code": "", "explicitDependencies": { - "actions": [ ], + "actions": [ ], "includeTrigger": true } }, @@ -1073,26 +1073,23 @@ This action runs a JavaScript code snippet and returns the results through a `Re | Value | Type | Description | |-------|------|-------------| -| <*JavaScript-code-snippet*> | Varies | The JavaScript code that you want to run. For code requirements and more information, see [Add and run code snippets with inline code](../logic-apps/logic-apps-add-run-inline-code.md).

    In the `code` attribute, your code snippet can use the read-only `workflowContext` object as input. This object has subproperties that give your code access to the results from the trigger and previous actions in your workflow. For more information about the `workflowContext` object, see [Reference trigger and action results in your code](../logic-apps/logic-apps-add-run-inline-code.md#workflowcontext). | +| <*JavaScript-code-snippet*> | Varies | The JavaScript code that you want to run. For code requirements and more information, see [Run code snippets in workflows](logic-apps-add-run-inline-code.md).

    In the `code` attribute, your code snippet can use the read-only `workflowContext` object as input. This object has subproperties that give your code access to the outputs from the trigger and any preceding actions in your workflow. For more information about the `workflowContext` object, see [Reference trigger and action results using the workflowContext object](logic-apps-add-run-inline-code.md#workflowcontext). | |||| *Required in some cases* -The `explicitDependencies` attribute specifies that you want to explicitly -include results from the trigger, previous actions, or both as dependencies -for your code snippet. For more information about adding these dependencies, see -[Add parameters for inline code](../logic-apps/logic-apps-add-run-inline-code.md#add-parameters). +The `explicitDependencies` attribute specifies that you want to explicitly include results from the trigger, previous actions, or both as dependencies for your code snippet. For more information about adding these dependencies, see [Add dependencies as parameters to an Inline Code action](logic-apps-add-run-inline-code.md#add-parameters). For the `includeTrigger` attribute, you can specify `true` or `false` values. | Value | Type | Description | |-------|------|-------------| -| <*previous-actions*> | String array | An array with your specified action names. Use the action names that appear in your workflow definition where action names use underscores (_), not spaces (" "). | +| <*preceding-actions*> | String array | An array with the action names in JSON format as dependencies. Make sure to use the action names that appear in your workflow definition where action names use underscores (**_**), not spaces (**" "**). | |||| *Example 1* -This action runs code that gets your logic app's name and returns the text "Hello world from \" as the result. In this example, the code references the workflow's name by accessing the `workflowContext.workflow.name` property through the read-only `workflowContext` object. For more information about using the `workflowContext` object, see [Reference trigger and action results in your code](../logic-apps/logic-apps-add-run-inline-code.md#workflowcontext). +This action runs code that gets your logic app workflow's name and returns the text "Hello world from \" as the result. In this example, the code references the workflow's name by accessing the `workflowContext.workflow.name` property through the read-only `workflowContext` object. For more information about using the `workflowContext` object, see [Reference trigger and action results in your code](../logic-apps/logic-apps-add-run-inline-code.md#workflowcontext). ```json "Execute_JavaScript_Code": { @@ -1106,18 +1103,18 @@ This action runs code that gets your logic app's name and returns the text "Hell *Example 2* -This action runs code in a logic app that triggers when a new email arrives in a work or school account. The logic app also uses a send approval email action that forwards the content from the received email along with a request for approval. +This action runs code in a logic app workflow that triggers when a new email arrives in an Outlook account. The workflow also uses the Office 365 Outlook **Send approval email** action that forwards the content from the received email along with a request for approval. -The code extracts the email addresses from the trigger's `Body` property and returns the addresses along with the `SelectedOption` property value from the approval action. The action explicitly includes the send approval email action as a dependency in the `explicitDependencies` > `actions` attribute. +The code extracts the email addresses from the email message's `Body` property, and returns the addresses along with the `SelectedOption` property value from the approval action. The action explicitly includes the **Send approval email** action as a dependency in the `actions` object inside the `explicitDependencies` object. ```json "Execute_JavaScript_Code": { "type": "JavaScriptCode", "inputs": { - "code": "var re = /(([^<>()\\[\\]\\\\.,;:\\s@\"]+(\\.[^<>()\\[\\]\\\\.,;:\\s@\"]+)*)|(\".+\"))@((\\[[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}])|(([a-zA-Z\\-0-9]+\\.)+[a-zA-Z]{2,}))/g;\r\n\r\nvar email = workflowContext.trigger.outputs.body.Body;\r\n\r\nvar reply = workflowContext.actions.Send_approval_email_.outputs.body.SelectedOption;\r\n\r\nreturn email.match(re) + \" - \" + reply;\r\n;", + "code": "var myResult = /(([^<>()\\[\\]\\\\.,;:\\s@\"]+(\\.[^<>()\\[\\]\\\\.,;:\\s@\"]+)*)|(\".+\"))@((\\[[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}])|(([a-zA-Z\\-0-9]+\\.)+[a-zA-Z]{2,}))/g;\r\n\r\nvar email = workflowContext.trigger.outputs.body.Body;\r\n\r\nvar reply = workflowContext.actions.Send_approval_email.outputs.body.SelectedOption;\r\n\r\nreturn email.match(myResult) + \" - \" + reply;\r\n;", "explicitDependencies": { "actions": [ - "Send_approval_email_" + "Send_approval_email" ] } }, @@ -1125,8 +1122,6 @@ The code extracts the email addresses from the trigger's `Body` property and ret } ``` - - ### Function action @@ -1153,7 +1148,7 @@ This action calls a previously created [Azure function](../azure-functions/funct | Value | Type | Description | |-------|------|-------------| -| <*Azure-function-ID*> | String | The resource ID for the Azure function you want to call. Here is the format for this value:

    "/subscriptions/<*Azure-subscription-ID*>/resourceGroups/<*Azure-resource-group*>/providers/Microsoft.Web/sites/<*Azure-function-app-name*>/functions/<*Azure-function-name*>" | +| <*Azure-function-ID*> | String | The resource ID for the Azure function you want to call. Here's the format for this value:

    "/subscriptions/<*Azure-subscription-ID*>/resourceGroups/<*Azure-resource-group*>/providers/Microsoft.Web/sites/<*Azure-function-app-name*>/functions/<*Azure-function-name*>" | | <*method-type*> | String | The HTTP method to use for calling the function: "GET", "PUT", "POST", "PATCH", or "DELETE"

    If not specified, the default is the "POST" method. | |||| @@ -1569,7 +1564,7 @@ This action definition creates a JSON object array from an integer array. The ac }, ``` -Here is the array that this action creates: +Here's the array that this action creates: `[ { "number": 1 }, { "number": 2 }, { "number": 3 } ]` @@ -1676,7 +1671,7 @@ This action definition creates a CSV table from the "myItemArray" variable. The } ``` -Here is the CSV table that this action creates: +Here's the CSV table that this action creates: ``` ID,Product_Name @@ -1699,7 +1694,7 @@ This action definition creates an HTML table from the "myItemArray" variable. Th } ``` -Here is the HTML table that this action creates: +Here's the HTML table that this action creates:
    IDProduct_Name
    0Apples
    1Oranges
    @@ -1728,7 +1723,7 @@ This action definition creates an HTML table from the "myItemArray" variable. Ho }, ``` -Here is the HTML table that this action creates: +Here's the HTML table that this action creates:
    Stock_IDDescription
    0Organic Apples
    1Organic Oranges
    @@ -2447,7 +2442,7 @@ Here are some considerations to review before you enable concurrency on a trigge * To work around this possibility, add a timeout to any action that might hold up these runs. If you're working in the code editor, see [Change asynchronous duration](#asynchronous-limits). Otherwise, if you're using the designer, follow these steps: - 1. In your logic app, on the action where you want to add a timeout, in the upper-right corner, select the ellipses (**...**) button, and then select **Settings**. + 1. In your logic app workflow, select the action where you want to add a timeout. In the action's upper-right corner, select the ellipses (**...**) button, and then select **Settings**. ![Open action settings](./media/logic-apps-workflow-actions-triggers/action-settings.png) @@ -2505,7 +2500,7 @@ To change the default limit, you can use either the code view editor or Logic Ap In the underlying "for each" definition, add or update the `runtimeConfiguration.concurrency.repetitions` property, which can have a value that ranges from `1` and `50`. -Here is an example that limits concurrent runs to 10 iterations: +Here's an example that limits concurrent runs to 10 iterations: ```json "For_each" { diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-json-name.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-json-name.png new file mode 100644 index 000000000000..ce672e9c963f Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-json-name.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-consumption.png new file mode 100644 index 000000000000..fe6879822b09 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-standard.png new file mode 100644 index 000000000000..1643571d7eda Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter.png deleted file mode 100644 index 7744b140c8a5..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-action-parameter.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-consumption.png new file mode 100644 index 000000000000..6d79376d23f1 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-standard.png new file mode 100644 index 000000000000..54f85834d664 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter-code-snippet-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter.png new file mode 100644 index 000000000000..764c05e6f925 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-actions-parameter.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-new-step.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/add-new-step.png deleted file mode 100644 index 182a3379afa2..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/add-new-step.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-consumption.png new file mode 100644 index 000000000000..6cf0803a7cab Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-standard.png new file mode 100644 index 000000000000..2677fd02668c Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete.png deleted file mode 100644 index de2dc0e7ad69..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/auto-complete.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/find-action-name-json.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/find-action-name-json.png index 5bb315d5ca91..7bea820d73e8 100644 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/find-action-name-json.png and b/articles/logic-apps/media/logic-apps-add-run-inline-code/find-action-name-json.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-add-parameters.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-add-parameters.png deleted file mode 100644 index 5814d5cbf7e1..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-add-parameters.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default-consumption.png new file mode 100644 index 000000000000..6e8aacf63c18 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default.png deleted file mode 100644 index ae3e9c9c7833..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-action-default.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-consumption.png new file mode 100644 index 000000000000..6f68949ac143 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-standard.png new file mode 100644 index 000000000000..28f5763c4ec5 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example.png deleted file mode 100644 index 1ce50d2ead4a..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-complete-example.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-overview.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-overview.png deleted file mode 100644 index ced1c22de669..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-overview.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-select-outputs.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-select-outputs.png deleted file mode 100644 index 5db17a4723ca..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-example-select-outputs.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-consumption.png new file mode 100644 index 000000000000..d9e9baf03e41 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-standard.png new file mode 100644 index 000000000000..96f6beea6118 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/inline-code-overview-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-consumption.png new file mode 100644 index 000000000000..a1fc165198dc Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-standard.png new file mode 100644 index 000000000000..0a3918e2419f Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/rename-body-property-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/return-statement-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/return-statement-standard.png new file mode 100644 index 000000000000..55fb406847e8 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/return-statement-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-consumption.png new file mode 100644 index 000000000000..080e9157b5ae Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-standard.png new file mode 100644 index 000000000000..afcf6be0e25d Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable.png deleted file mode 100644 index f5dc468f22fd..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/save-email-body-variable.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-consumption.png new file mode 100644 index 000000000000..22a1864257d0 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-standard.png new file mode 100644 index 000000000000..2686633eff76 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action.png deleted file mode 100644 index 421a1a22b089..000000000000 Binary files a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-inline-code-action.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-consumption.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-consumption.png new file mode 100644 index 000000000000..54550e3adf33 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-standard.png b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-standard.png new file mode 100644 index 000000000000..71f22e56ec0e Binary files /dev/null and b/articles/logic-apps/media/logic-apps-add-run-inline-code/select-output-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-consumption.png new file mode 100644 index 000000000000..788b3b11d1e7 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-standard.png new file mode 100644 index 000000000000..87af8a0a6a7a Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-inputs-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-consumption.png new file mode 100644 index 000000000000..dea74dfa1b95 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-standard.png new file mode 100644 index 000000000000..c58e877eac20 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/failed-action-outputs-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details-expanded.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details-expanded.png deleted file mode 100644 index 78fa81e7b4dd..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details-expanded.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details.png deleted file mode 100644 index daca9cdb50a4..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-details.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-consumption.png new file mode 100644 index 000000000000..7260005f09c2 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-standard.png new file mode 100644 index 000000000000..4e99e836d88e Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane.png deleted file mode 100644 index e93e1b359d61..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-run-pane.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-consumption.png new file mode 100644 index 000000000000..9e12bb0bd4e6 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-overview.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-overview.png deleted file mode 100644 index 09d893604cda..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-overview.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-standard.png new file mode 100644 index 000000000000..656520e1e51c Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history.png deleted file mode 100644 index 1aaee087605a..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-runs-history.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-consumption.png new file mode 100644 index 000000000000..2c466eadd711 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-overview.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-overview.png deleted file mode 100644 index 22e18d78c9ad..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-overview.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-standard.png new file mode 100644 index 000000000000..f865aace7515 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history.png deleted file mode 100644 index 5a138066a020..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/logic-app-trigger-history.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-action-outputs-for-errors.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-action-outputs-for-errors.png deleted file mode 100644 index 8c40cb97723b..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-action-outputs-for-errors.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-consumption.png new file mode 100644 index 000000000000..8ed5412ebeec Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-for-errors.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-for-errors.png deleted file mode 100644 index 6890035e4c37..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-for-errors.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-standard.png new file mode 100644 index 000000000000..d080b74c2f9f Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-inputs-standard.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-consumption.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-consumption.png new file mode 100644 index 000000000000..1359acf972a5 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-consumption.png differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-for-errors.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-for-errors.png deleted file mode 100644 index 8f169f15e81c..000000000000 Binary files a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-for-errors.png and /dev/null differ diff --git a/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png new file mode 100644 index 000000000000..7a13739ead67 Binary files /dev/null and b/articles/logic-apps/media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/add-condition-for-rule.png b/articles/logic-apps/media/monitor-logic-apps/add-condition-for-rule.png deleted file mode 100644 index c32d254fcc49..000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/add-condition-for-rule.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/add-new-alert-rule.png b/articles/logic-apps/media/monitor-logic-apps/add-new-alert-rule.png index 432dd1d8c5db..7022cc952f0c 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/add-new-alert-rule.png and b/articles/logic-apps/media/monitor-logic-apps/add-new-alert-rule.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/failed-action-inputs-standard.png b/articles/logic-apps/media/monitor-logic-apps/failed-action-inputs-standard.png new file mode 100644 index 000000000000..87af8a0a6a7a Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/failed-action-inputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/failed-action-outputs-standard.png b/articles/logic-apps/media/monitor-logic-apps/failed-action-outputs-standard.png new file mode 100644 index 000000000000..c58e877eac20 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/failed-action-outputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/find-and-select-signal.png b/articles/logic-apps/media/monitor-logic-apps/find-and-select-signal.png index cba53e56ba0f..65b3c8df1e3d 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/find-and-select-signal.png and b/articles/logic-apps/media/monitor-logic-apps/find-and-select-signal.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/find-your-logic-app.png b/articles/logic-apps/media/monitor-logic-apps/find-your-logic-app.png index 633f1cce290c..0e27a587f9b3 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/find-your-logic-app.png and b/articles/logic-apps/media/monitor-logic-apps/find-your-logic-app.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/finished-alert-condition-cost.png b/articles/logic-apps/media/monitor-logic-apps/finished-alert-condition-cost.png index b3373115be20..eff43e819d8a 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/finished-alert-condition-cost.png and b/articles/logic-apps/media/monitor-logic-apps/finished-alert-condition-cost.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-consumption.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-consumption.png new file mode 100644 index 000000000000..3b88a64d5a4f Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-standard.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-standard.png new file mode 100644 index 000000000000..4e99e836d88e Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane.png deleted file mode 100644 index 3f63a20e5e8b..000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/logic-app-run-pane.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-trigger-history.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-trigger-history.png deleted file mode 100644 index 8e4ae719d474..000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/logic-app-trigger-history.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-consumption.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-consumption.png new file mode 100644 index 000000000000..2cee657d6487 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-standard.png b/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-standard.png new file mode 100644 index 000000000000..c22dec807464 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/logic-app-triggers-history-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/logic-apps-list-in-subscription.png b/articles/logic-apps/media/monitor-logic-apps/logic-apps-list-in-subscription.png index ac7a8a1048b5..17898041fe06 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/logic-apps-list-in-subscription.png and b/articles/logic-apps/media/monitor-logic-apps/logic-apps-list-in-subscription.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-consumption.png b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-consumption.png new file mode 100644 index 000000000000..cbdd0c7e7ee0 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-standard.png b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-standard.png new file mode 100644 index 000000000000..fc258c6197c4 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-runs-history-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-consumption.png b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-consumption.png new file mode 100644 index 000000000000..2d30e8fa0481 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-standard.png b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-standard.png new file mode 100644 index 000000000000..468f2da252ce Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/overview-logic-app-trigger-history-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-run-history.png b/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-run-history.png deleted file mode 100644 index 98684b199e85..000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-run-history.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-trigger-history.png b/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-trigger-history.png deleted file mode 100644 index 8e366b7653c0..000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/overview-pane-logic-app-details-trigger-history.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/review-logic-app-run-details.png b/articles/logic-apps/media/monitor-logic-apps/review-logic-app-run-details.png index cfe155edd104..5f6e9439f720 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/review-logic-app-run-details.png and b/articles/logic-apps/media/monitor-logic-apps/review-logic-app-run-details.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/review-trigger-inputs-standard.png b/articles/logic-apps/media/monitor-logic-apps/review-trigger-inputs-standard.png new file mode 100644 index 000000000000..70e59a93ef02 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/review-trigger-inputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/review-trigger-outputs-standard.png b/articles/logic-apps/media/monitor-logic-apps/review-trigger-outputs-standard.png new file mode 100644 index 000000000000..15a5f0397c04 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/review-trigger-outputs-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-failed-step-in-failed-run.png b/articles/logic-apps/media/monitor-logic-apps/select-failed-step-in-failed-run.png deleted file mode 100644 index b5dce6ad1631..000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/select-failed-step-in-failed-run.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-failed-step.png b/articles/logic-apps/media/monitor-logic-apps/select-failed-step.png new file mode 100644 index 000000000000..8e7329f18aa7 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/select-failed-step.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-run-details-on-toolbar.png b/articles/logic-apps/media/monitor-logic-apps/select-run-details-on-toolbar.png deleted file mode 100644 index 54f89cbe33bf..000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/select-run-details-on-toolbar.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-consumption.png b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-consumption.png new file mode 100644 index 000000000000..e9fe8fdbe24e Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-consumption.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-standard.png b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-standard.png new file mode 100644 index 000000000000..8c3247716b5d Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run.png b/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run.png deleted file mode 100644 index b2e07b620a1d..000000000000 Binary files a/articles/logic-apps/media/monitor-logic-apps/select-specific-logic-app-run.png and /dev/null differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review-standard.png b/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review-standard.png new file mode 100644 index 000000000000..338440b650a0 Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review-standard.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review.png b/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review.png index 0ff61cf98587..8da7ba5bd147 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review.png and b/articles/logic-apps/media/monitor-logic-apps/select-trigger-event-for-review.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/set-up-condition-for-alert.png b/articles/logic-apps/media/monitor-logic-apps/set-up-condition-for-alert.png index 67cdba0bc661..16a5e1170b21 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/set-up-condition-for-alert.png and b/articles/logic-apps/media/monitor-logic-apps/set-up-condition-for-alert.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/toolbar-select-run-details.png b/articles/logic-apps/media/monitor-logic-apps/toolbar-select-run-details.png new file mode 100644 index 000000000000..7e2423eb269c Binary files /dev/null and b/articles/logic-apps/media/monitor-logic-apps/toolbar-select-run-details.png differ diff --git a/articles/logic-apps/media/monitor-logic-apps/view-specific-trigger-details.png b/articles/logic-apps/media/monitor-logic-apps/view-specific-trigger-details.png index c697f4571a5c..e523ce128b01 100644 Binary files a/articles/logic-apps/media/monitor-logic-apps/view-specific-trigger-details.png and b/articles/logic-apps/media/monitor-logic-apps/view-specific-trigger-details.png differ diff --git a/articles/logic-apps/monitor-logic-apps.md b/articles/logic-apps/monitor-logic-apps.md index 33143057c9b7..81e537327bb2 100644 --- a/articles/logic-apps/monitor-logic-apps.md +++ b/articles/logic-apps/monitor-logic-apps.md @@ -3,9 +3,9 @@ title: Monitor status, view history, and set up alerts description: Troubleshoot logic apps by checking run status, reviewing trigger history, and enabling alerts in Azure Logic Apps. services: logic-apps ms.suite: integration -ms.reviewer: divswa, azla +ms.reviewer: estfan, azla ms.topic: how-to -ms.date: 05/04/2020 +ms.date: 05/24/2022 --- # Monitor run status, review trigger history, and set up alerts for Azure Logic Apps @@ -15,198 +15,319 @@ ms.date: 05/04/2020 > review the following sections in [Create an integration workflow with single-tenant Azure Logic Apps](create-single-tenant-workflows-azure-portal.md): > [Review run history](create-single-tenant-workflows-azure-portal.md#review-run-history), [Review trigger history](create-single-tenant-workflows-azure-portal.md#review-trigger-history), and [Enable or open Application Insights after deployment](create-single-tenant-workflows-azure-portal.md#enable-open-application-insights). -After you create and run a [Consumption logic app workflow](quickstart-create-first-logic-app-workflow.md), you can check that workflow's run status, [runs history](#review-runs-history), [trigger history](#review-trigger-history), and performance. To get notifications about failures or other possible problems, set up [alerts](#add-azure-alerts). For example, you can create an alert that detects "when more than five runs fail in an hour." +After you create and run a [Consumption logic app workflow](quickstart-create-first-logic-app-workflow.md), you can check that workflow's run status, [trigger history](#review-trigger-history), [runs history](#review-runs-history), and performance. To get notifications about failures or other possible problems, set up [alerts](#add-azure-alerts). For example, you can create an alert that detects "when more than five runs fail in an hour." -For real-time event monitoring and richer debugging, set up diagnostics logging for your logic app by using [Azure Monitor logs](../azure-monitor/overview.md). This Azure service helps you monitor your cloud and on-premises environments so that you can more easily maintain their availability and performance. You can then find and view events, such as trigger events, run events, and action events. By storing this information in [Azure Monitor logs](../azure-monitor/logs/data-platform-logs.md), you can create [log queries](../azure-monitor/logs/log-query-overview.md) that help you find and analyze this information. You can also use this diagnostic data with other Azure services, such as Azure Storage and Azure Event Hubs. For more information, see [Monitor logic apps by using Azure Monitor](../logic-apps/monitor-logic-apps-log-analytics.md). +For real-time event monitoring and richer debugging, set up diagnostics logging for your logic app by using [Azure Monitor logs](../azure-monitor/overview.md). This Azure service helps you monitor your cloud and on-premises environments so that you can more easily maintain their availability and performance. You can then find and view events, such as trigger events, run events, and action events. By storing this information in [Azure Monitor logs](../azure-monitor/logs/data-platform-logs.md), you can create [log queries](../azure-monitor/logs/log-query-overview.md) that help you find and analyze this information. You can also use this diagnostic data with other Azure services, such as Azure Storage and Azure Event Hubs. For more information, see [Monitor logic apps by using Azure Monitor](monitor-logic-apps-log-analytics.md). > [!NOTE] -> If your logic apps run in an [integration service environment (ISE)](../logic-apps/connect-virtual-network-vnet-isolated-environment-overview.md) -> that was created to use an [internal access endpoint](../logic-apps/connect-virtual-network-vnet-isolated-environment-overview.md#endpoint-access), -> you can view and access inputs and outputs from logic app's runs history *only from inside your virtual network*. Make sure that you have network +> If your logic apps run in an [integration service environment (ISE)](connect-virtual-network-vnet-isolated-environment-overview.md) +> that was created to use an [internal access endpoint](connect-virtual-network-vnet-isolated-environment-overview.md#endpoint-access), +> you can view and access inputs and outputs from a workflow runs history *only from inside your virtual network*. Make sure that you have network > connectivity between the private endpoints and the computer from where you want to access runs history. For example, your client computer can exist > inside the ISE's virtual network or inside a virtual network that's connected to the ISE's virtual network, for example, through peering or a virtual -> private network. For more information, see [ISE endpoint access](../logic-apps/connect-virtual-network-vnet-isolated-environment-overview.md#endpoint-access). +> private network. For more information, see [ISE endpoint access](connect-virtual-network-vnet-isolated-environment-overview.md#endpoint-access). - + + +## Review trigger history -## Review runs history +Each workflow run starts with a trigger, which either fires on a schedule or waits for an incoming request or event. The trigger history lists all the trigger attempts that your logic app made and information about the inputs and outputs for each trigger attempt. -Each time that the trigger fires for an item or event, the Logic Apps engine creates and runs a separate workflow instance for each item or event. By default, each workflow instance runs in parallel so that no workflow has to wait before starting a run. You can review what happened during that run, including the status for each step in the workflow plus the inputs and outputs for each step. +### [Consumption](#tab/consumption) 1. In the [Azure portal](https://portal.azure.com), find and open your logic app workflow in the designer. - To find your logic app, in the main Azure search box, enter `logic apps`, and then select **Logic apps**. + To find your logic app, in the portal search box, enter **logic apps**, and then select **Logic apps**. - ![Find and select "Logic Apps" service](./media/monitor-logic-apps/find-your-logic-app.png) + ![Screenshot showing the Azure portal main search box with "logic apps" entered and "Logic apps" selected.](./media/monitor-logic-apps/find-your-logic-app.png) - The Azure portal shows all the logic apps that are associated with your Azure subscriptions. You can filter this list based on name, subscription, resource group, location, and so on. + The Azure portal shows all the logic apps in your Azure subscription. You can filter this list based on name, subscription, resource group, location, and so on. + + ![Screenshot showing the Azure portal with all logic apps associated with selected Azure subscriptions.](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + +1. Select your logic app. On your logic app's menu, select **Overview**. On the Overview pane, select **Trigger history**. - ![View logic apps associated with subscriptions](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + ![Screenshot showing "Overview" pane for a Consumption logic app workflow with "Trigger history" selected.](./media/monitor-logic-apps/overview-logic-app-trigger-history-consumption.png) -1. Select your logic app, and then select **Overview**. + Under **Trigger history**, all trigger attempts appear. Each time the trigger successfully fires, Azure Logic Apps creates an individual workflow instance and runs that instance. By default, each instance runs in parallel so that no workflow has to wait before starting a run. If your workflow triggers for multiple events or items at the same time, a trigger entry appears for each item with the same date and time. - On the overview pane, under **Runs history**, all the past, current, and any waiting runs for your logic app appear. If the list shows many runs, and you can't find the entry that you want, try filtering the list. + ![Screenshot showing "Overview" pane for a Consumption logic app workflow with multiple trigger attempts for different items.](./media/monitor-logic-apps/logic-app-triggers-history-consumption.png) + + The following table lists the possible trigger statuses: + + | Trigger status | Description | + |----------------|-------------| + | **Failed** | An error occurred. To review any generated error messages for a failed trigger, select that trigger attempt and choose **Outputs**. For example, you might find inputs that aren't valid. | + | **Skipped** | The trigger checked the endpoint but found no data that met the specified criteria. | + | **Succeeded** | The trigger checked the endpoint and found available data. Usually, a **Fired** status also appears alongside this status. If not, the trigger definition might have a condition or `SplitOn` command that wasn't met.

    This status can apply to a manual trigger, recurrence-based trigger, or polling trigger. A trigger can run successfully, but the run itself might still fail when the actions generate unhandled errors. | + ||| > [!TIP] - > If the run status doesn't appear, try refreshing the overview page by selecting **Refresh**. - > No run happens for a trigger that's skipped due to unmet criteria or finding no data. + > + > You can recheck the trigger without waiting for the next recurrence. On the + > **Overview** pane toolbar or on the designer toolbar, select **Run Trigger** > **Run**. + +1. To view information about a specific trigger attempt, select that trigger event. + + ![Screenshot showing the Consumption workflow trigger entry selected.](./media/monitor-logic-apps/select-trigger-event-for-review.png) + + If the list shows many trigger attempts, and you can't find the entry that you want, try filtering the list. If you don't find the data that you expect, try selecting **Refresh** on the toolbar. + + You can now review information about the selected trigger event, for example: + + ![Screenshot showing the selected Consumption workflow trigger history information.](./media/monitor-logic-apps/view-specific-trigger-details.png) + +### [Standard](#tab/standard) + +1. In the [Azure portal](https://portal.azure.com), find and open your logic app workflow in the designer. + + To find your logic app, in the portal search box, enter **logic apps**, and then select **Logic apps**. - ![Overview, runs history, and other logic app information](./media/monitor-logic-apps/overview-pane-logic-app-details-run-history.png) + ![Screenshot showing the Azure portal search box with "logic apps" entered and "Logic apps" selected.](./media/monitor-logic-apps/find-your-logic-app.png) - Here are the possible run statuses: + The Azure portal shows all the logic apps in your Azure subscription. You can filter this list based on name, subscription, resource group, location, and so on. + + ![Screenshot showing Azure portal with all logic apps associated with selected Azure subscriptions.](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + +1. Select your logic app. On your logic app's menu, select **Overview**. On the Overview pane, select **Trigger history**. + + ![Screenshot showing Overview pane with "Trigger history" selected.](./media/monitor-logic-apps/overview-logic-app-trigger-history-standard.png) + + Under **Trigger history**, all trigger attempts appear. Each time the trigger successfully fires, Azure Logic Apps creates an individual workflow instance and runs that instance. By default, each instance runs in parallel so that no workflow has to wait before starting a run. If your workflow triggers for multiple events or items at the same time, a trigger entry appears for each item with the same date and time. + + ![Screenshot showing Overview pane with multiple trigger attempts for different items.](./media/monitor-logic-apps/logic-app-triggers-history-standard.png) + + The following table lists the possible trigger statuses: + + | Trigger status | Description | + |----------------|-------------| + | **Failed** | An error occurred. To review any generated error messages for a failed trigger, select that trigger attempt and choose **Outputs**. For example, you might find inputs that aren't valid. | + | **Skipped** | The trigger checked the endpoint but found no data that met the specified criteria. | + | **Succeeded** | The trigger checked the endpoint and found available data. Usually, a **Fired** status also appears alongside this status. If not, the trigger definition might have a condition or `SplitOn` command that wasn't met.

    This status can apply to a manual trigger, recurrence-based trigger, or polling trigger. A trigger can run successfully, but the run itself might still fail when the actions generate unhandled errors. | + ||| + + > [!TIP] + > + > You can recheck the trigger without waiting for the next recurrence. On the + > **Overview** pane toolbar, select **Run Trigger** > **Run**. + +1. To view information about a specific trigger attempt, select that trigger event. + + ![Screenshot showing a Standard workflow trigger entry selected.](./media/monitor-logic-apps/select-trigger-event-for-review-standard.png) + + If the list shows many trigger attempts, and you can't find the entry that you want, try filtering the list. If you don't find the data that you expect, try selecting **Refresh** on the toolbar. + +1. Check the trigger's inputs to confirm that they appear as you expect. On the **History** pane, under **Inputs link**, select the link, which shows the **Inputs** pane. + + ![Screenshot showing Standard logic app workflow trigger inputs.](./media/monitor-logic-apps/review-trigger-inputs-standard.png) + +1. Check the triggers outputs, if any, to confirm that they appear as you expect. On the **History** pane, under **Outputs link**, select the link, which shows the **Outputs** pane. + + Trigger outputs include the data that the trigger passes to the next step in your workflow. Reviewing these outputs can help you determine whether the correct or expected values passed on to the next step in your workflow. + + For example, the RSS trigger generated an error message that states that the RSS feed wasn't found. + + ![Screenshot showing Standard logic app workflow trigger outputs.](./media/logic-apps-diagnosing-failures/review-trigger-outputs-standard.png) + +--- + + + +## Review workflow run history + +Each time the trigger successfully fires, Azure Logic Apps creates a workflow instance and runs that instance. By default, each instance runs in parallel so that no workflow has to wait before starting a run. You can review what happened during each run, including the status, inputs, and outputs for each step in the workflow. + +### [Consumption](#tab/consumption) + +1. In the [Azure portal](https://portal.azure.com), find and open your logic app workflow in the designer. + + To find your logic app, in the main Azure search box, enter **logic apps**, and then select **Logic apps**. + + ![Screenshot showing Azure portal main search box with "logic apps" entered and "Logic apps" selected.](./media/monitor-logic-apps/find-your-logic-app.png) + + The Azure portal shows all the logic apps that are associated with your Azure subscriptions. You can filter this list based on name, subscription, resource group, location, and so on. + + ![Screenshot showing all the logic apps in selected Azure subscriptions.](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + +1. Select your logic app. On your logic app's menu, select **Overview**. On the Overview pane, select **Runs history**. + + Under **Runs history**, all the past, current, and any waiting runs appear. If the trigger fires for multiple events or items at the same time, an entry appears for each item with the same date and time. + + ![Screenshot showing Consumption logic app workflow "Overview" pane with "Runs history" selected.](./media/monitor-logic-apps/overview-logic-app-runs-history-consumption.png) + + The following table lists the possible run statuses: | Run status | Description | |------------|-------------| | **Aborted** | The run stopped or didn't finish due to external problems, for example, a system outage or lapsed Azure subscription. | - | **Cancelled** | The run was triggered and started but received a cancellation request. | + | **Cancelled** | The run was triggered and started, but received a cancellation request. | | **Failed** | At least one action in the run failed. No subsequent actions in the workflow were set up to handle the failure. | - | **Running** | The run was triggered and is in progress, but this status can also appear for a run that is throttled due to [action limits](logic-apps-limits-and-config.md) or the [current pricing plan](https://azure.microsoft.com/pricing/details/logic-apps/).

    **Tip**: If you set up [diagnostics logging](monitor-logic-apps-log-analytics.md), you can get information about any throttle events that happen. | + | **Running** | The run was triggered and is in progress. However, this status can also appear for a run that's throttled due to [action limits](logic-apps-limits-and-config.md) or the [current pricing plan](https://azure.microsoft.com/pricing/details/logic-apps/).

    **Tip**: If you set up [diagnostics logging](monitor-logic-apps-log-analytics.md), you can get information about any throttle events that happen. | | **Succeeded** | The run succeeded. If any action failed, a subsequent action in the workflow handled that failure. | - | **Timed out** | The run timed out because the current duration exceeded the run duration limit, which is controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits). A run's duration is calculated by using the run's start time and run duration limit at that start time.

    **Note**: If the run's duration also exceeds the current *run history retention limit*, which is also controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits), the run is cleared from the runs history by a daily cleanup job. Whether the run times out or completes, the retention period is always calculated by using the run's start time and *current* retention limit. So, if you reduce the duration limit for an in-flight run, the run times out. However, the run either stays or is cleared from the runs history based on whether the run's duration exceeded the retention limit. | + | **Timed out** | The run timed out because the current duration exceeded the run duration limit, which is controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits). A run's duration is calculated by using the run's start time and run duration limit at that start time.

    **Note**: If the run's duration also exceeds the current *run history retention limit*, which is also controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits), the run is cleared from the runs history by a daily cleanup job. Whether the run times out or completes, the retention period is always calculated by using the run's start time and *current* retention limit. So, if you reduce the duration limit for an in-flight run, the run times out. However, the run either stays or is cleared from the runs history based on whether the run's duration exceeded the retention limit. | | **Waiting** | The run hasn't started or is paused, for example, due to an earlier workflow instance that's still running. | ||| -1. To review the steps and other information for a specific run, under **Runs history**, select that run. +1. To review the steps and other information for a specific run, under **Runs history**, select that run. If the list shows many runs, and you can't find the entry that you want, try filtering the list. + + > [!TIP] + > + > If the run status doesn't appear, try refreshing the overview pane by selecting **Refresh**. + > No run happens for a trigger that's skipped due to unmet criteria or finding no data. - ![Select a specific run to review](./media/monitor-logic-apps/select-specific-logic-app-run.png) + ![Screenshot showing the Consumption logic app workflow run selected.](./media/monitor-logic-apps/select-specific-logic-app-run-consumption.png) The **Logic app run** pane shows each step in the selected run, each step's run status, and the time taken for each step to run, for example: - ![Each action in the specific run](./media/monitor-logic-apps/logic-app-run-pane.png) + ![Screenshot showing each action in the selected workflow run.](./media/monitor-logic-apps/logic-app-run-pane-consumption.png) To view this information in list form, on the **Logic app run** toolbar, select **Run Details**. - ![On the toolbar, select "Run Details"](./media/monitor-logic-apps/select-run-details-on-toolbar.png) + ![Screenshot showing the "Logic app run" toolbar with "Run Details" selected.](./media/monitor-logic-apps/toolbar-select-run-details.png) - The Run Details view shows each step, their status, and other information. + The Run Details lists each step, their status, and other information. - ![Review details about each step in the run](./media/monitor-logic-apps/review-logic-app-run-details.png) + ![Screenshot showing the run details for each step in the workflow.](./media/monitor-logic-apps/review-logic-app-run-details.png) For example, you can get the run's **Correlation ID** property, which you might need when you use the [REST API for Logic Apps](/rest/api/logic). 1. To get more information about a specific step, select either option: - * In the **Logic app run** pane select the step so that the shape expands. You can now view information such as inputs, outputs, and any errors that happened in that step, for example: + * In the **Logic app run** pane, select the step so that the shape expands. You can now view information such as inputs, outputs, and any errors that happened in that step. - ![In logic app run pane, view failed step](./media/monitor-logic-apps/specific-step-inputs-outputs-errors.png) + For example, suppose you had an action that failed, and you wanted to review which inputs might have caused that step to fail. By expanding the shape, you can view the inputs, outputs, and error for that step: - * In the **Logic app run details** pane, select the step that you want. + ![Screenshot showing the "Logic app run" pane with the expanded shape for an example failed step.](./media/monitor-logic-apps/specific-step-inputs-outputs-errors.png) - ![In run details pane, view failed step](./media/monitor-logic-apps/select-failed-step-in-failed-run.png) + * In the **Logic app run details** pane, select the step that you want. - You can now view information such as inputs and outputs for that step, for example: + ![Screenshot showing the the "Logic app run details" pane with the example failed step selected.](./media/monitor-logic-apps/select-failed-step.png) > [!NOTE] - > All runtime details and events are encrypted within the Logic Apps service. - > They are decrypted only when a user requests to view that data. - > You can [hide inputs and outputs in run history](../logic-apps/logic-apps-securing-a-logic-app.md#obfuscate) + > + > All runtime details and events are encrypted within Azure Logic Apps and + > are decrypted only when a user requests to view that data. You can + > [hide inputs and outputs in run history](logic-apps-securing-a-logic-app.md#obfuscate) > or control user access to this information by using > [Azure role-based access control (Azure RBAC)](../role-based-access-control/overview.md). - - -## Review trigger history - -Each logic app run starts with a trigger. The trigger history lists all the trigger attempts that your logic app made and information about the inputs and outputs for each trigger attempt. +### [Standard](#tab/standard) 1. In the [Azure portal](https://portal.azure.com), find and open your logic app workflow in the designer. - To find your logic app, in the main Azure search box, enter `logic apps`, and then select **Logic Apps**. + To find your logic app, in the main Azure search box, enter **logic apps**, and then select **Logic apps**. - ![Find and select "Logic Apps" service](./media/monitor-logic-apps/find-your-logic-app.png) + ![Screenshot showing Azure portal search box with "logic apps" entered and "Logic apps" selected.](./media/monitor-logic-apps/find-your-logic-app.png) The Azure portal shows all the logic apps that are associated with your Azure subscriptions. You can filter this list based on name, subscription, resource group, location, and so on. - ![View logic apps associated with subscriptions](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) + ![Screenshot showing all logic apps in selected Azure subscriptions.](./media/monitor-logic-apps/logic-apps-list-in-subscription.png) -1. Select your logic app, and then select **Overview**. +1. Select your logic app. On your logic app's menu, under **Workflows**, select **Workflows**, and then select your workflow. -1. On your logic app's menu, select **Overview**. In the **Summary** section, under **Evaluation**, select **See trigger history**. + > [!NOTE] + > + > By default, stateless workflows don't store run history unless you enable this capability for debugging. + > For more information, review [Stateful versus stateless workflows](single-tenant-overview-compare.md#stateful-stateless). - ![View trigger history for your logic app](./media/monitor-logic-apps/overview-pane-logic-app-details-trigger-history.png) +1. On your workflow's menu, select **Overview**. On the Overview pane, select **Run History**. - The trigger history pane shows all the trigger attempts that your logic app has made. Each time that the trigger fires for an item or event, the Logic Apps engine creates a separate logic app instance that runs the workflow. By default, each instance runs in parallel so that no workflow has to wait before starting a run. So if your logic app triggers on multiple items at the same time, a trigger entry with the same date and time appears for each item. + Under **Run History**, all the past, current, and any waiting runs appear. If the trigger fires for multiple events or items at the same time, an entry appears for each item with the same date and time. - ![Multiple trigger attempts for different items](./media/monitor-logic-apps/logic-app-trigger-history.png) + ![Screenshot showing Standard logic app workflow "Overview" pane with "Run History" selected.](./media/monitor-logic-apps/overview-logic-app-runs-history-standard.png) - Here are the possible trigger attempt statuses: + The following table lists the possible run statuses: - | Trigger status | Description | - |----------------|-------------| - | **Failed** | An error occurred. To review any generated error messages for a failed trigger, select that trigger attempt and choose **Outputs**. For example, you might find inputs that aren't valid. | - | **Skipped** | The trigger checked the endpoint but found no data that met the specified criteria. | - | **Succeeded** | The trigger checked the endpoint and found available data. Usually, a **Fired** status also appears alongside this status. If not, the trigger definition might have a condition or `SplitOn` command that wasn't met.

    This status can apply to a manual trigger, recurrence trigger, or polling trigger. A trigger can run successfully, but the run itself might still fail when the actions generate unhandled errors. | + | Run status | Description | + |------------|-------------| + | **Aborted** | The run stopped or didn't finish due to external problems, for example, a system outage or lapsed Azure subscription. | + | **Cancelled** | The run was triggered and started, but received a cancellation request. | + | **Failed** | At least one action in the run failed. No subsequent actions in the workflow were set up to handle the failure. | + | **Running** | The run was triggered and is in progress. However, this status can also appear for a run that's throttled due to [action limits](logic-apps-limits-and-config.md) or the [current pricing plan](https://azure.microsoft.com/pricing/details/logic-apps/).

    **Tip**: If you set up [diagnostics logging](monitor-logic-apps-log-analytics.md), you can get information about any throttle events that happen. | + | **Succeeded** | The run succeeded. If any action failed, a subsequent action in the workflow handled that failure. | + | **Timed out** | The run timed out because the current duration exceeded the run duration limit, which is controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits). A run's duration is calculated by using the run's start time and run duration limit at that start time.

    **Note**: If the run's duration also exceeds the current *run history retention limit*, which is also controlled by the [**Run history retention in days** setting](logic-apps-limits-and-config.md#run-duration-retention-limits), the run is cleared from the runs history by a daily cleanup job. Whether the run times out or completes, the retention period is always calculated by using the run's start time and *current* retention limit. So, if you reduce the duration limit for an in-flight run, the run times out. However, the run either stays or is cleared from the runs history based on whether the run's duration exceeded the retention limit. | + | **Waiting** | The run hasn't started or is paused, for example, due to an earlier workflow instance that's still running. | ||| - > [!TIP] - > You can recheck the trigger without waiting for the next recurrence. On the overview toolbar, select **Run Trigger**, - > and select the trigger, which forces a check. Or, select **Run Trigger** on designer toolbar. +1. To review the steps and other information for a specific run, under **Run History**, select that run. If the list shows many runs, and you can't find the entry that you want, try filtering the list. -1. To view information about a specific trigger attempt, on the trigger pane, select that trigger event. If the list shows many trigger attempts, and you can't find the entry that you want, try filtering the list. If you don't find the data that you expect, try selecting **Refresh** on the toolbar. + > [!TIP] + > + > If the run status doesn't appear, try refreshing the overview pane by selecting **Refresh**. + > No run happens for a trigger that's skipped due to unmet criteria or finding no data. - ![View specific trigger attempt](./media/monitor-logic-apps/select-trigger-event-for-review.png) + ![Screenshot showing the Standard workflow run selected.](./media/monitor-logic-apps/select-specific-logic-app-run-standard.png) - You can now review information about the selected trigger event, for example: + The workflow run pane shows each step in the selected run, each step's run status, and the time taken for each step to run, for example: - ![View specific trigger information](./media/monitor-logic-apps/view-specific-trigger-details.png) + ![Screenshot showing each action in selected workflow run.](./media/monitor-logic-apps/logic-app-run-pane-standard.png) - +1. After all the steps in the run appear, select each step to review more information such as inputs, outputs, and any errors that happened in that step. -## Set up monitoring alerts + For example, suppose you had an action that failed, and you wanted to review which inputs might have caused that step to fail. -To get alerts based on specific metrics or exceeded thresholds for your logic app, set up [alerts in Azure Monitor](../azure-monitor/alerts/alerts-overview.md). Learn about [metrics in Azure](../azure-monitor/data-platform.md). To set up alerts without using [Azure Monitor](../azure-monitor/logs/log-query-overview.md), follow these steps. + ![Screenshot showing Standard logic app workflow with failed step inputs.](./media/monitor-logic-apps/failed-action-inputs-standard.png) -1. On your logic app menu, under **Monitoring**, select **Alerts** > **New alert rule**. + The following screenshot shows the outputs from the failed step. - ![Add an alert for your logic app](./media/monitor-logic-apps/add-new-alert-rule.png) + ![Screenshot showing Standard logic app workflow with failed step outputs.](./media/monitor-logic-apps/failed-action-outputs-standard.png) -1. On the **Create rule** pane, under **Resource**, select your logic app, if not already selected. Under **Condition**, select **Add** so that you can define the condition that triggers the alert. + > [!NOTE] + > + > All runtime details and events are encrypted within Azure Logic Apps and + > are decrypted only when a user requests to view that data. You can + > [hide inputs and outputs in run history](logic-apps-securing-a-logic-app.md#obfuscate). - ![Add a condition for the rule](./media/monitor-logic-apps/add-condition-for-rule.png) +--- -1. On the **Configure signal logic** pane, find and select the signal for which you want to get an alert. You can use the search box, or to sort the signals alphabetically, select the **Signal name** column header. + - For example, if you want to send an alert when a trigger fails, follow these steps: +## Set up monitoring alerts - 1. In the **Signal name** column, find and select the **Triggers Failed** signal. +To get alerts based on specific metrics or exceeded thresholds for your logic app, set up [alerts in Azure Monitor](../azure-monitor/alerts/alerts-overview.md). For more information, review [Metrics in Azure](../azure-monitor/data-platform.md). To set up alerts without using [Azure Monitor](../azure-monitor/logs/log-query-overview.md), follow these steps. - ![Select signal for creating alert](./media/monitor-logic-apps/find-and-select-signal.png) +1. On your logic app menu, under **Monitoring**, select **Alerts**. On the toolbar, select **Create** > **Alert rule**. - 1. On the information pane that opens for the selected signal, under **Alert logic**, set up your condition, for example: + ![Screenshot showing Azure portal, logic app menu with "Alerts" selected, and toolbar with "Create", "Alert rule" selected.](./media/monitor-logic-apps/add-new-alert-rule.png) - 1. For **Operator**, select **Greater than or equal to**. +1. On the **Select a signal** pane, under **Signal type**, select the signal for which you want to get an alert. - 1. For **Aggregation type**, select **Count**. + > [!TIP] + > + > You can use the search box, or to sort the signals alphabetically, + > select the **Signal name** column header. - 1. For **Threshold value**, enter `1`. + For example, to send an alert when a trigger fails, follow these steps: - 1. Under **Condition preview**, confirm that your condition appears correct. + 1. In the **Signal name** column, find and select the **Triggers Failed** signal. - 1. Under **Evaluated based on**, set up the interval and frequency for running the alert rule. For **Aggregation granularity (Period)**, select the period for grouping the data. For **Frequency of evaluation**, select how often you want to check the condition. + ![Screenshot showing "Select a signal pane", the "Signal name" column, and "Triggers Failed" signal selected.](./media/monitor-logic-apps/find-and-select-signal.png) - 1. When you're ready, select **Done**. + 1. On the **Configure signal logic** pane, under **Alert logic**, set up your condition, and select **Done**, for example: - Here's the finished condition: + | Property | Example value | + |----------|---------------| + | **Operator** | **Greater than or equal to** | + | **Aggregation type** | **Count** | + | **Threshold value** | **1** | + | **Unit** | **Count** | + | **Condition preview** | **Whenever the count of triggers failed is greater than or equal to 1** | + | **Aggregation granularity (Period)** | **1 minute** | + | **Frequency of evaluation** | **Every 1 Minute** | + ||| - ![Set up condition for alert](./media/monitor-logic-apps/set-up-condition-for-alert.png) + For more information, review [Create, view, and manage log alerts by using Azure Monitor](../azure-monitor/alerts/alerts-activity-log.md). - The **Create rule** page now shows the condition that you created and the cost for running that alert. + The following screenshot shows the finished condition: - ![New alert on the "Create rule" page](./media/monitor-logic-apps/finished-alert-condition-cost.png) + ![Screenshot showing the condition for alert.](./media/monitor-logic-apps/set-up-condition-for-alert.png) -1. Specify a name, optional description, and severity level for your alert. Either leave the **Enable rule upon creation** setting turned on, or turn off until you're ready to enable the rule. + The **Create an alert rule** page now shows the condition that you created and the cost for running that alert. -1. When you're done, select **Create alert rule**. + ![Screenshot showing the new alert on the "Create an alert rule" page.](./media/monitor-logic-apps/finished-alert-condition-cost.png) -> [!TIP] -> To run a logic app from an alert, you can include the -> [request trigger](../connectors/connectors-native-reqres.md) in your workflow, -> which lets you perform tasks like these examples: -> -> * [Post to Slack](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/alert-to-slack-with-logic-app) -> * [Send a text](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/alert-to-text-message-with-logic-app) -> * [Add a message to a queue](https://github.com/Azure/azure-quickstart-templates/tree/master/demos/alert-to-queue-with-logic-app) +1. If you're satisfied, select **Next: Details** to finish creating the rule. ## Next steps -* [Monitor logic apps by using Azure Monitor](../logic-apps/monitor-logic-apps-log-analytics.md) +* [Monitor logic apps with Azure Monitor](monitor-logic-apps-log-analytics.md) diff --git a/articles/machine-learning/how-to-auto-train-image-models.md b/articles/machine-learning/how-to-auto-train-image-models.md index bb62031cf93c..d4782fd4c7dd 100644 --- a/articles/machine-learning/how-to-auto-train-image-models.md +++ b/articles/machine-learning/how-to-auto-train-image-models.md @@ -9,7 +9,7 @@ ms.service: machine-learning ms.subservice: automl ms.custom: event-tier1-build-2022 ms.topic: how-to -ms.date: 01/18/2022 +ms.date: 05/26/2022 #Customer intent: I'm a data scientist with ML knowledge in the computer vision space, looking to build ML models using image data in Azure Machine Learning with full control of the model algorithm, hyperparameters, and training and deployment environments. --- @@ -185,7 +185,7 @@ The following is a sample JSONL file for image classification: Once your data is in JSONL format, you can create training and validation `MLTable` as shown below. -:::code language="yaml" source="~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: +:::code language="yaml" source="~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: Automated ML doesn't impose any constraints on training or validation data size for computer vision tasks. Maximum dataset size is only limited by the storage layer behind the dataset (i.e. blob store). There's no minimum number of images or labels. However, we recommend starting with a minimum of 10-15 samples per label to ensure the output model is sufficiently trained. The higher the total number of labels/classes, the more samples you need per label. @@ -211,7 +211,7 @@ validation_data: You can create data inputs from training and validation MLTable from your local directory or cloud storage with the following code: -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=data-load)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=data-load)] Training data is a required parameter and is passed in using the `training_data` parameter of the task specific `automl` type function. You can optionally specify another MLTable as a validation data with the `validation_data` parameter. If no validation data is specified, 20% of your training data will be used for validation by default, unless you pass `validation_data_size` argument with a different value. @@ -329,7 +329,7 @@ limits: # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=limit-settings)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=limit-settings)] --- @@ -404,7 +404,7 @@ sweep: # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=sweep-settings)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=sweep-settings)] --- @@ -426,7 +426,7 @@ image_model: # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=pass-arguments)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=pass-arguments)] --- @@ -514,7 +514,7 @@ az ml job create --file ./hello-automl-job-basic.yml --workspace-name [YOUR_AZUR When you've configured your AutoML Job to the desired settings, you can submit the job. -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=submit-run)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=submit-run)] --- ## Outputs and evaluation metrics diff --git a/articles/machine-learning/how-to-configure-network-isolation-with-v2.md b/articles/machine-learning/how-to-configure-network-isolation-with-v2.md index 93787ec002e1..6b29c4b87776 100644 --- a/articles/machine-learning/how-to-configure-network-isolation-with-v2.md +++ b/articles/machine-learning/how-to-configure-network-isolation-with-v2.md @@ -44,7 +44,7 @@ The Azure Machine Learning CLI v2 uses our new v2 API platform. New features suc As mentioned in the previous section, there are two types of operations; with ARM and with the workspace. With the __legacy v1 API__, most operations used the workspace. With the v1 API, adding a private endpoint to the workspace provided network isolation for everything except CRUD operations on the workspace or compute resources. -With the __new v2 API__, most operations use ARM. So enabling a private endpoint on your workspace doesn't provide the same level of network isolation. Operations that use ARM communicate over public networks, and include any metadata (such as your resource IDs) or parameters used by the operation. For example, the [create or update job](/rest/api/azureml/jobs/create-or-update) api sends metadata, and [parameters](/azure/machine-learning/reference-yaml-job-command). +With the __new v2 API__, most operations use ARM. So enabling a private endpoint on your workspace doesn't provide the same level of network isolation. Operations that use ARM communicate over public networks, and include any metadata (such as your resource IDs) or parameters used by the operation. For example, the [create or update job](/rest/api/azureml/jobs/create-or-update) api sends metadata, and [parameters](./reference-yaml-job-command.md). > [!TIP] > * Public ARM operations do not surface data in your storage account on public networks. @@ -116,4 +116,4 @@ az ml workspace show -g -w --query v1LegacyMode ## Next steps * [Use a private endpoint with Azure Machine Learning workspace](how-to-configure-private-link.md). -* [Create private link for managing Azure resources](/azure/azure-resource-manager/management/create-private-link-access-portal). \ No newline at end of file +* [Create private link for managing Azure resources](../azure-resource-manager/management/create-private-link-access-portal.md). \ No newline at end of file diff --git a/articles/machine-learning/how-to-create-component-pipeline-python.md b/articles/machine-learning/how-to-create-component-pipeline-python.md index 216062419d5e..6024202ab41e 100644 --- a/articles/machine-learning/how-to-create-component-pipeline-python.md +++ b/articles/machine-learning/how-to-create-component-pipeline-python.md @@ -8,7 +8,7 @@ ms.subservice: mlops ms.topic: how-to author: likebupt ms.author: keli19 -ms.date: 05/10/2022 +ms.date: 05/26/2022 ms.custom: devx-track-python, sdkv2, event-tier1-build-2022 --- @@ -59,7 +59,7 @@ This article is based on the [image_classification_keras_minist_convnet.ipynb](h Import all the Azure Machine Learning required libraries that you'll need for this article: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=required-library)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=required-library)] ## Prepare input data for your pipeline job @@ -69,7 +69,7 @@ Fashion-MNIST is a dataset of fashion images divided into 10 classes. Each image To define the input data of a job that references the Web-based data, run: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=define-input)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=define-input)] By defining an `Input`, you create a reference to the data source location. The data remains in its existing location, so no extra storage cost is incurred. @@ -103,7 +103,7 @@ If you're following along with the example in the [AzureML Examples repo](https: By using command_component() function as a decorator, you can easily define the component's interface, metadata and code to execute from a python function. Each decorated Python function will be transformed into a single static specification (YAML) that the pipeline service can process. -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/prep_component.py"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/prep_component.py"::: The code above define a component with display name `Prep Data` using `@command_component` decorator: @@ -127,13 +127,13 @@ Following is what a component looks like in the studio UI. You'll need to modify the runtime environment in which your component runs. -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/prep_component.py" range="5-10"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/prep_component.py" range="5-10"::: The above code creates an object of `Environment` class, which represents the runtime environment in which the component runs. The `conda.yaml` file contains all packages used for the component like following: -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/conda.yaml"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/prep/conda.yaml"::: Now, you've prepared all source files for the `Prep Data` component. @@ -159,7 +159,7 @@ The `train.py` file contains a normal python function, which performs the traini After defining the training function successfully, you can use @command_component in Azure Machine Learning SDK v2 to wrap your function as a component, which can be used in AML pipelines. -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/train/train_component.py"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/train/train_component.py"::: The code above define a component with display name `Train Image Classification Keras` using `@command_component`: @@ -169,7 +169,7 @@ The code above define a component with display name `Train Image Classification The train-model component has a slightly more complex configuration than the prep-data component. The `conda.yaml` is like following: -:::code language="yaml" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/train/conda.yaml"::: +:::code language="yaml" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/train/conda.yaml"::: Now, you've prepared all source files for the `Train Image Classification Keras` component. @@ -187,7 +187,7 @@ If you're following along with the example in the [AzureML Examples repo](https: The `score.py` file contains a normal python function, which performs the training model logic. -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/score/score.py"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/score/score.py"::: The code in score.py takes three command-line arguments: `input_data`, `input_model` and `output_result`. The program score the input model using input data and then output the scoring result. @@ -200,7 +200,7 @@ In this section, you'll learn to create a component specification in the valid Y - Interface: inputs and outputs - Command, code, & environment: The command, code, and environment used to run the component -:::code language="python" source="~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/score/score.yaml"::: +:::code language="python" source="~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/score/score.yaml"::: * `name` is the unique identifier of the component. Its display name is `Score Image Classification Keras`. * This component has two inputs and one output. @@ -220,17 +220,17 @@ For prep-data component and train-model component defined by python function, yo In the following code, you import `prepare_data_component()` and `keras_train_component()` function from the `prep_component.py` file under `prep` folder and `train_component` file under `train` folder respectively. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=load-from-dsl-component)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=load-from-dsl-component)] For score component defined by yaml, you can use `load_component()` function to load. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=load-from-yaml)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=load-from-yaml)] ## Build your pipeline Now that you've created and loaded all components and input data to build the pipeline. You can compose them into a pipeline: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=build-pipeline)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=build-pipeline)] The pipeline has a default compute `cpu_compute_target`, which means if you don't specify compute for a specific node, that node will run on the default compute. @@ -261,13 +261,13 @@ We'll use `DefaultAzureCredential` to get access to workspace. `DefaultAzureCred Reference for more available credentials if it doesn't work for you: [configure credential example](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb), [azure-identity reference doc](/python/api/azure-identity/azure.identity?view=azure-python&preserve-view=true ). -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=credential)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=credential)] #### Get a handle to a workspace with compute Create a `MLClient` object to manage Azure Machine Learning services. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=workspace)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=workspace)] > [!IMPORTANT] > This code snippet expects the workspace configuration json file to be saved in the current directory or its parent. For more information on creating a workspace, see [Create and manage Azure Machine Learning workspaces](how-to-manage-workspace.md). For more information on saving the configuration to file, see [Create a workspace configuration file](how-to-configure-environment.md#workspace). @@ -276,7 +276,7 @@ Create a `MLClient` object to manage Azure Machine Learning services. Now you've get a handle to your workspace, you can submit your pipeline job. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=submit-pipeline)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=submit-pipeline)] The code above submit this image classification pipeline job to experiment called `pipeline_samples`. It will auto create the experiment if not exists. The `pipeline_input_data` uses `fashion_ds`. @@ -291,7 +291,7 @@ The call to `submit` the `Experiment` completes quickly, and produces output sim You can monitor the pipeline run by opening the link or you can block until it completes by running: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=stream-pipeline)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=stream-pipeline)] > [!IMPORTANT] > The first pipeline run takes roughly *15 minutes*. All dependencies must be downloaded, a Docker image is created, and the Python environment is provisioned and created. Running the pipeline again takes significantly less time because those resources are reused instead of created. However, total run time for the pipeline depends on the workload of your scripts and the processes that are running in each pipeline step. @@ -308,7 +308,7 @@ You can check the logs and outputs of each component by right clicking the compo In the previous section, you have built a pipeline using three components to E2E complete an image classification task. You can also register components to your workspace so that they can be shared and resued within the workspace. Following is an example to register prep-data component. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=register-component)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=register-component)] Using `ml_client.components.get()`, you can get a registered component by name and version. Using `ml_client.compoennts.create_or_update()`, you can register a component previously loaded from python function or yaml. diff --git a/articles/machine-learning/how-to-create-component-pipelines-cli.md b/articles/machine-learning/how-to-create-component-pipelines-cli.md index b6ba63b1f4da..b834ffa9e84d 100644 --- a/articles/machine-learning/how-to-create-component-pipelines-cli.md +++ b/articles/machine-learning/how-to-create-component-pipelines-cli.md @@ -7,7 +7,7 @@ ms.service: machine-learning ms.subservice: core author: xiaoharper ms.author: zhanxia -ms.date: 05/10/2022 +ms.date: 05/26/2022 ms.topic: how-to ms.custom: devplatv2, devx-track-azurecli, event-tier1-build-2022 ms.devlang: azurecli, cliv2 @@ -92,7 +92,7 @@ Open the `services.Studio.endpoint` URL you'll see a graph visualization of the Let's take a look at the pipeline definition in the *3b_pipeline_with_data/pipeline.yml* file. -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/pipeline.yml"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/pipeline.yml"::: Below table describes the most common used fields of pipeline YAML schema. See [full pipeline YAML schema here](reference-yaml-job-pipeline.md). @@ -125,7 +125,7 @@ One common scenario is to read and write data in your pipeline. In AuzreML, we u Now let's look at the *componentA.yml* as an example to understand component definition YAML. -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/componentA.yml"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/componentA.yml"::: The most common used schema of the component YAML is described in below table. See [full component YAML schema here](reference-yaml-component-command.md). @@ -193,7 +193,7 @@ Under **Jobs** tab, you'll see the history of all jobs that use this component. Let's use `1b_e2e_registered_components` to demo how to use registered component in pipeline YAML. Navigate to `1b_e2e_registered_components` directory, open the `pipeline.yml` file. The keys and values in the `inputs` and `outputs` fields are similar to those already discussed. The only significant difference is the value of the `component` field in the `jobs..component` entries. The `component` value is of the form `azureml::`. The `train-job` definition, for instance, specifies the latest version of the registered component `my_train` should be used: -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/basics/1b_e2e_registered_components/pipeline.yml" range="24-36" highlight="4"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/basics/1b_e2e_registered_components/pipeline.yml" range="24-36" highlight="4"::: ### Manage components diff --git a/articles/machine-learning/how-to-deploy-local.md b/articles/machine-learning/how-to-deploy-local.md index 15b615c485e0..014e5764f281 100644 --- a/articles/machine-learning/how-to-deploy-local.md +++ b/articles/machine-learning/how-to-deploy-local.md @@ -245,7 +245,7 @@ model = Model.register(model_path="sklearn_regression_model.pkl", You can then find your newly registered model on the Azure Machine Learning **Model** tab: -:::image type="content" source="media/how-to-deploy-local/registered-model.png" alt-text="Screenshot of Azure Machine Learning Model tab, showing an uploaded model."::: +:::image type="content" source="media/how-to-deploy-local/registered-model.png" alt-text="Screenshot of Azure Machine Learning Model tab, showing an uploaded model." lightbox="media/how-to-deploy-local/registered-model.png"::: For more information on uploading and updating models and environments, see [Register model and deploy locally with advanced usages](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local-advanced.ipynb). diff --git a/articles/machine-learning/how-to-deploy-managed-online-endpoint-sdk-v2.md b/articles/machine-learning/how-to-deploy-managed-online-endpoint-sdk-v2.md new file mode 100644 index 000000000000..d9cae8b860be --- /dev/null +++ b/articles/machine-learning/how-to-deploy-managed-online-endpoint-sdk-v2.md @@ -0,0 +1,313 @@ +--- +title: Deploy machine learning models to managed online endpoint using Python SDK v2 (preview). +titleSuffix: Azure Machine Learning +description: Learn to deploy your machine learning model to Azure using Python SDK v2 (preview). +services: machine-learning +ms.service: machine-learning +ms.subservice: mlops +ms.author: ssambare +ms.reviewer: larryfr +author: shivanissambare +ms.date: 05/25/2022 +ms.topic: how-to +ms.custom: how-to, devplatv2, sdkv2, deployment +--- + +# Deploy and score a machine learning model with managed online endpoint using Python SDK v2 (preview) + +[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] + +> [!IMPORTANT] +> SDK v2 is currently in public preview. +> The preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +In this article, you learn how to deploy your machine learning model to managed online endpoint and get predictions. You'll begin by deploying a model on your local machine to debug any errors, and then you'll deploy and test it in Azure. + +## Prerequisites + +* If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/) today. +* The [Azure Machine Learning SDK v2 for Python](/python/api/overview/azure/ml/installv2). +* You must have an Azure resource group, and you (or the service principal you use) must have Contributor access to it. +* You must have an Azure Machine Learning workspace. +* To deploy locally, you must install [Docker Engine](https://docs.docker.com/engine/) on your local computer. We highly recommend this option, so it's easier to debug issues. + +### Clone examples repository + +To run the training examples, first clone the examples repository and change into the `sdk` directory: + +```bash +git clone --depth 1 https://github.com/Azure/azureml-examples +cd azureml-examples/sdk +``` + +> [!TIP] +> Use `--depth 1` to clone only the latest commit to the repository, which reduces time to complete the operation. + +## Connect to Azure Machine Learning workspace + +The [workspace](concept-workspace.md) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section, we'll connect to the workspace in which you'll perform deployment tasks. + +1. Import the required libraries: + + ```python + # import required libraries + from azure.ai.ml import MLClient + from azure.ai.ml.entities import ( + ManagedOnlineEndpoint, + ManagedOnlineDeployment, + Model, + Environment, + CodeConfiguration, + ) + from azure.identity import DefaultAzureCredential + ``` + +1. Configure workspace details and get a handle to the workspace: + + To connect to a workspace, we need identifier parameters - a subscription, resource group and workspace name. We'll use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. This example uses the [default Azure authentication](/python/api/azure-identity/azure.identity.defaultazurecredential). + + ```python + # enter details of your AML workspace + subscription_id = "" + resource_group = "" + workspace = "" + ``` + + ```python + # get a handle to the workspace + ml_client = MLClient( + DefaultAzureCredential(), subscription_id, resource_group, workspace + ) + ``` + +## Create local endpoint and deployment + +> [!NOTE] +> To deploy locally, [Docker Engine](https://docs.docker.com/engine/install/) must be installed. +> Docker Engine must be running. Docker Engine typically starts when the computer starts. If it doesn't, you can [troubleshoot Docker Engine](https://docs.docker.com/config/daemon/#start-the-daemon-manually). + +1. Create local endpoint: + + The goal of a local endpoint deployment is to validate and debug your code and configuration before you deploy to Azure. Local deployment has the following limitations: + + * Local endpoints don't support traffic rules, authentication, or probe settings. + * Local endpoints support only one deployment per endpoint. + + ```python + # Creating a local endpoint + import datetime + + local_endpoint_name = "local-" + datetime.datetime.now().strftime("%m%d%H%M%f") + + # create an online endpoint + endpoint = ManagedOnlineEndpoint( + name=local_endpoint_name, description="this is a sample local endpoint" + ) + ``` + + ```python + ml_client.online_endpoints.begin_create_or_update(endpoint, local=True) + ``` + +1. Create local deployment: + + The example contains all the files needed to deploy a model on an online endpoint. To deploy a model, you must have: + + * Model files (or the name and version of a model that's already registered in your workspace). In the example, we have a scikit-learn model that does regression. + * The code that's required to score the model. In this case, we have a score.py file. + * An environment in which your model runs. As you'll see, the environment might be a Docker image with Conda dependencies, or it might be a Dockerfile. + * Settings to specify the instance type and scaling capacity. + + **Key aspects of deployment** + * `name` - Name of the deployment. + * `endpoint_name` - Name of the endpoint to create the deployment under. + * `model` - The model to use for the deployment. This value can be either a reference to an existing versioned model in the workspace or an inline model specification. + * `environment` - The environment to use for the deployment. This value can be either a reference to an existing versioned environment in the workspace or an inline environment specification. + * `code_configuration` - the configuration for the source code and scoring script + * `path`- Path to the source code directory for scoring the model + * `scoring_script` - Relative path to the scoring file in the source code directory + * `instance_type` - The VM size to use for the deployment. For the list of supported sizes, see [Managed online endpoints SKU list](reference-managed-online-endpoints-vm-sku-list.md). + * `instance_count` - The number of instances to use for the deployment + + ```python + model = Model(path="../model-1/model/sklearn_regression_model.pkl") + env = Environment( + conda_file="../model-1/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", + ) + + blue_deployment = ManagedOnlineDeployment( + name="blue", + endpoint_name=local_endpoint_name, + model=model, + environment=env, + code_configuration=CodeConfiguration( + code="../model-1/onlinescoring", scoring_script="score.py" + ), + instance_type="Standard_F2s_v2", + instance_count=1, + ) + ``` + + ```python + ml_client.online_deployments.begin_create_or_update( + deployment=blue_deployment, local=True + ) + ``` + +## Verify the local deployment succeeded + +1. Check the status to see whether the model was deployed without error: + + ```python + ml_client.online_endpoints.get(name=local_endpoint_name, local=True) + ``` + +1. Get logs: + + ```python + ml_client.online_deployments.get_logs( + name="blue", endpoint_name=local_endpoint_name, local=True, lines=50 + ) + ``` + +## Invoke the local endpoint + +Invoke the endpoint to score the model by using the convenience command invoke and passing query parameters that are stored in a JSON file + +```python +ml_client.online_endpoints.invoke( + endpoint_name=local_endpoint_name, + request_file="../model-1/sample-request.json", + local=True, +) +``` + +## Deploy your online endpoint to Azure + +Next, deploy your online endpoint to Azure. + +1. Configure online endpoint: + + > [!TIP] + > * `endpoint_name`: The name of the endpoint. It must be unique in the Azure region. For more information on the naming rules, see [managed online endpoint limits](how-to-manage-quotas.md#azure-machine-learning-managed-online-endpoints). + > * `auth_mode` : Use `key` for key-based authentication. Use `aml_token` for Azure Machine Learning token-based authentication. A `key` doesn't expire, but `aml_token` does expire. For more information on authenticating, see [Authenticate to an online endpoint](how-to-authenticate-online-endpoint.md). + > * Optionally, you can add description, tags to your endpoint. + + ```python + # Creating a unique endpoint name with current datetime to avoid conflicts + import datetime + + online_endpoint_name = "endpoint-" + datetime.datetime.now().strftime("%m%d%H%M%f") + + # create an online endpoint + endpoint = ManagedOnlineEndpoint( + name=online_endpoint_name, + description="this is a sample online endpoint", + auth_mode="key", + tags={"foo": "bar"}, + ) + ``` + +1. Create the endpoint: + + Using the `MLClient` created earlier, we'll now create the Endpoint in the workspace. This command will start the endpoint creation and return a confirmation response while the endpoint creation continues. + + ```python + ml_client.begin_create_or_update(endpoint) + ``` + +1. Configure online deployment: + + A deployment is a set of resources required for hosting the model that does the actual inferencing. We'll create a deployment for our endpoint using the `ManagedOnlineDeployment` class. + + ```python + model = Model(path="../model-1/model/sklearn_regression_model.pkl") + env = Environment( + conda_file="../model-1/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", + ) + + blue_deployment = ManagedOnlineDeployment( + name="blue", + endpoint_name=online_endpoint_name, + model=model, + environment=env, + code_configuration=CodeConfiguration( + code="../model-1/onlinescoring", scoring_script="score.py" + ), + instance_type="Standard_F2s_v2", + instance_count=1, + ) + ``` + +1. Create the deployment: + + Using the `MLClient` created earlier, we'll now create the deployment in the workspace. This command will start the deployment creation and return a confirmation response while the deployment creation continues. + + ```python + ml_client.begin_create_or_update(blue_deployment) + ``` + + ```python + # blue deployment takes 100 traffic + endpoint.traffic = {"blue": 100} + ml_client.begin_create_or_update(endpoint) + ``` + +## Test the endpoint with sample data + +Using the `MLClient` created earlier, we'll get a handle to the endpoint. The endpoint can be invoked using the `invoke` command with the following parameters: + +* `endpoint_name` - Name of the endpoint +* `request_file` - File with request data +* `deployment_name` - Name of the specific deployment to test in an endpoint + +We'll send a sample request using a [json](https://github.com/Azure/azureml-examples/blob/main/sdk/endpoints/online/model-1/sample-request.json) file. + +```python +# test the blue deployment with some sample data +ml_client.online_endpoints.invoke( + endpoint_name=online_endpoint_name, + deployment_name="blue", + request_file="../model-1/sample-request.json", +) +``` + +## Managing endpoints and deployments + +1. Get details of the endpoint: + + ```python + # Get the details for online endpoint + endpoint = ml_client.online_endpoints.get(name=online_endpoint_name) + + # existing traffic details + print(endpoint.traffic) + + # Get the scoring URI + print(endpoint.scoring_uri) + ``` + +1. Get the logs for the new deployment: + + Get the logs for the green deployment and verify as needed + + ```python + ml_client.online_deployments.get_logs( + name="blue", endpoint_name=online_endpoint_name, lines=50 + ) + ``` + +## Delete the endpoint + +```python +ml_client.online_endpoints.begin_delete(name=online_endpoint_name) +``` + +## Next steps + +Try these next steps to learn how to use the Azure Machine Learning SDK (v2) for Python: +* [Managed online endpoint safe rollout](how-to-safely-rollout-managed-endpoints-sdk-v2.md) +* Explore online endpoint samples - [https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints](https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints) \ No newline at end of file diff --git a/articles/machine-learning/how-to-log-pipelines-application-insights.md b/articles/machine-learning/how-to-log-pipelines-application-insights.md index 4ff8313d92da..d5c4ae57af09 100644 --- a/articles/machine-learning/how-to-log-pipelines-application-insights.md +++ b/articles/machine-learning/how-to-log-pipelines-application-insights.md @@ -159,6 +159,6 @@ Some of the queries below use 'customDimensions.Level'. These severity levels co ## Next Steps -Once you have logs in your Application Insights instance, they can be used to set [Azure Monitor alerts](../azure-monitor/alerts/alerts-overview.md#what-you-can-alert-on) based on query results. +Once you have logs in your Application Insights instance, they can be used to set [Azure Monitor alerts](../azure-monitor/alerts/alerts-overview.md) based on query results. You can also add results from queries to an [Azure Dashboard](../azure-monitor/app/tutorial-app-dashboards.md#add-logs-query) for additional insights. diff --git a/articles/machine-learning/how-to-network-security-overview.md b/articles/machine-learning/how-to-network-security-overview.md index d408cf67cf6f..068f5b480e32 100644 --- a/articles/machine-learning/how-to-network-security-overview.md +++ b/articles/machine-learning/how-to-network-security-overview.md @@ -101,7 +101,7 @@ Use the following steps to secure your workspace and associated resources. These | Service | Endpoint information | Allow trusted information | | ----- | ----- | ----- | | __Azure Key Vault__| [Service endpoint](../key-vault/general/overview-vnet-service-endpoints.md)
    [Private endpoint](../key-vault/general/private-link-service.md) | [Allow trusted Microsoft services to bypass this firewall](how-to-secure-workspace-vnet.md#secure-azure-key-vault) | - | __Azure Storage Account__ | [Service and private endpoint](how-to-secure-workspace-vnet.md?tabs=se#secure-azure-storage-accounts)
    [Private endpoint](how-to-secure-workspace-vnet.md?tabs=pe#secure-azure-storage-accounts) | [Grant access from Azure resource instances](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances-preview)
    **or**
    [Grant access to trusted Azure services](../storage/common/storage-network-security.md#grant-access-to-trusted-azure-services) | + | __Azure Storage Account__ | [Service and private endpoint](how-to-secure-workspace-vnet.md?tabs=se#secure-azure-storage-accounts)
    [Private endpoint](how-to-secure-workspace-vnet.md?tabs=pe#secure-azure-storage-accounts) | [Grant access from Azure resource instances](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances)
    **or**
    [Grant access to trusted Azure services](../storage/common/storage-network-security.md#grant-access-to-trusted-azure-services) | | __Azure Container Registry__ | [Private endpoint](../container-registry/container-registry-private-link.md) | [Allow trusted services](../container-registry/allow-access-trusted-services.md) | diff --git a/articles/machine-learning/how-to-prepare-datasets-for-automl-images.md b/articles/machine-learning/how-to-prepare-datasets-for-automl-images.md index 4950c7185635..d0d52dbac7fa 100644 --- a/articles/machine-learning/how-to-prepare-datasets-for-automl-images.md +++ b/articles/machine-learning/how-to-prepare-datasets-for-automl-images.md @@ -8,7 +8,7 @@ ms.service: machine-learning ms.subservice: automl ms.topic: how-to ms.custom: template-how-to, sdkv2, event-tier1-build-2022 -ms.date: 04/15/2022 +ms.date: 05/26/2022 --- # Prepare data for computer vision tasks with automated machine learning (preview) @@ -70,7 +70,7 @@ az ml data create -f [PATH_TO_YML_FILE] --workspace-name [YOUR_AZURE_WORKSPACE] ``` # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=upload-data)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=upload-data)] --- Next, you will need to get the label annotations in JSONL format. The schema of labeled data depends on the computer vision task at hand. Refer to [schemas for JSONL files for AutoML computer vision experiments](reference-automl-images-schema.md) to learn more about the required JSONL schema for each task type. @@ -81,7 +81,7 @@ If your training data is in a different format (like, pascal VOC or COCO), [help Once you have your labeled data in JSONL format, you can use it to create `MLTable` as shown below. MLtable packages your data into a consumable object for training. -:::code language="yaml" source="~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: +:::code language="yaml" source="~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: You can then pass in the `MLTable` as a data input for your AutoML training job. diff --git a/articles/machine-learning/how-to-read-write-data-v2.md b/articles/machine-learning/how-to-read-write-data-v2.md index b07852f6823c..87ef6b535819 100644 --- a/articles/machine-learning/how-to-read-write-data-v2.md +++ b/articles/machine-learning/how-to-read-write-data-v2.md @@ -9,7 +9,7 @@ ms.topic: how-to ms.author: yogipandey author: ynpandey ms.reviewer: ssalgadodev -ms.date: 04/15/2022 +ms.date: 05/26/2022 ms.custom: devx-track-python, devplatv2, sdkv2, cliv2, event-tier1-build-2022 #Customer intent: As an experienced Python developer, I need to read in my data to make it available to a remote compute to train my machine learning models. --- @@ -331,7 +331,7 @@ The following YAML file demonstrates how to use the output data from one compone [!INCLUDE [cli v2](../../includes/machine-learning-cli-v2.md)] -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/pipeline.yml"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/basics/3b_pipeline_with_data/pipeline.yml"::: ## Python SDK v2 (preview) @@ -341,7 +341,7 @@ The following example defines a pipeline containing three nodes and moves data b * `train_node` that trains a CNN model with Keras using the training data, `mnist_train.csv` . * `score_node` that scores the model using test data, `mnist_test.csv`. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=build-pipeline)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/2e_image_classification_keras_minist_convnet/image_classification_keras_minist_convnet.ipynb?name=build-pipeline)] ## Next steps * [Install and set up Python SDK v2 (preview)](https://aka.ms/sdk-v2-install) diff --git a/articles/machine-learning/how-to-safely-rollout-managed-endpoints-sdk-v2.md b/articles/machine-learning/how-to-safely-rollout-managed-endpoints-sdk-v2.md new file mode 100644 index 000000000000..4d57da05423a --- /dev/null +++ b/articles/machine-learning/how-to-safely-rollout-managed-endpoints-sdk-v2.md @@ -0,0 +1,305 @@ +--- +title: Safe rollout for managed online endpoints using Python SDK v2 (preview). +titleSuffix: Azure Machine Learning +description: Safe rollout for online endpoints using Python SDK v2 (preview). +services: machine-learning +ms.service: machine-learning +ms.subservice: mlops +ms.author: ssambare +ms.reviewer: larryfr +author: shivanissambare +ms.date: 05/25/2022 +ms.topic: how-to +ms.custom: how-to, devplatv2, sdkv2, deployment +--- + +# Safe rollout for managed online endpoints using Python SDK v2 (preview) + +[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] + +> [!IMPORTANT] +> SDK v2 is currently in public preview. +> The preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +In this article, you learn how to deploy a new version of the model without causing any disruption. With blue-green deployment or safe rollout, an approach in which a new version of a web service is introduced to production by rolling out the change to a small subset of users/requests before rolling it out completely. This article assumes you're using online endpoints; for more information, see [Azure Machine Learning endpoints](concept-endpoints.md). + +In this article, you'll learn to: + +* Deploy a new online endpoint called "blue" that serves version 1 of the model. +* Scale this deployment so that it can handle more requests. +* Deploy version 2 of the model to an endpoint called "green" that accepts no live traffic. +* Test the green deployment in isolation. +* Send 10% of live traffic to the green deployment. +* Fully cut-over all live traffic to the green deployment. +* Delete the now-unused v1 blue deployment. + +## Prerequisites + +* If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/) today. +* The [Azure Machine Learning SDK v2 for Python](/python/api/overview/azure/ml/installv2). +* You must have an Azure resource group, and you (or the service principal you use) must have Contributor access to it. +* You must have an Azure Machine Learning workspace. +* To deploy locally, you must install [Docker Engine](https://docs.docker.com/engine/) on your local computer. We highly recommend this option, so it's easier to debug issues. + +### Clone examples repository + +To run the training examples, first clone the examples repository and change into the `sdk` directory: + +```bash +git clone --depth 1 https://github.com/Azure/azureml-examples +cd azureml-examples/sdk +``` + +> [!TIP] +> Use `--depth 1` to clone only the latest commit to the repository, which reduces time to complete the operation. + +## Connect to Azure Machine Learning workspace + +The [workspace](concept-workspace.md) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section, we'll connect to the workspace in which you'll perform deployment tasks. + +1. Import the required libraries: + + ```python + # import required libraries + from azure.ai.ml import MLClient + from azure.ai.ml.entities import ( + ManagedOnlineEndpoint, + ManagedOnlineDeployment, + Model, + Environment, + CodeConfiguration, + ) + from azure.identity import DefaultAzureCredential + ``` + +1. Configure workspace details and get a handle to the workspace: + + To connect to a workspace, we need identifier parameters - a subscription, resource group and workspace name. We'll use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. This example uses the [default Azure authentication](/python/api/azure-identity/azure.identity.defaultazurecredential). + + ```python + # enter details of your AML workspace + subscription_id = "" + resource_group = "" + workspace = "" + ``` + + ```python + # get a handle to the workspace + ml_client = MLClient( + DefaultAzureCredential(), subscription_id, resource_group, workspace + ) + ``` + +## Create online endpoint + +Online endpoints are endpoints that are used for online (real-time) inferencing. Online endpoints contain deployments that are ready to receive data from clients and can send responses back in real time. + +To create an online endpoint, we'll use `ManagedOnlineEndpoint`. This class allows user to configure the following key aspects: + +* `name` - Name of the endpoint. Needs to be unique at the Azure region level +* `auth_mode` - The authentication method for the endpoint. Key-based authentication and Azure ML token-based authentication are supported. Key-based authentication doesn't expire but Azure ML token-based authentication does. Possible values are `key` or `aml_token`. +* `identity`- The managed identity configuration for accessing Azure resources for endpoint provisioning and inference. + * `type`- The type of managed identity. Azure Machine Learning supports `system_assigned` or `user_assigned` identity. + * `user_assigned_identities` - List (array) of fully qualified resource IDs of the user-assigned identities. This property is required if `identity.type` is user_assigned. +* `description`- Description of the endpoint. + +1. Configure the endpoint: + + ```python + # Creating a unique endpoint name with current datetime to avoid conflicts + import datetime + + online_endpoint_name = "endpoint-" + datetime.datetime.now().strftime("%m%d%H%M%f") + + # create an online endpoint + endpoint = ManagedOnlineEndpoint( + name=online_endpoint_name, + description="this is a sample online endpoint", + auth_mode="key", + tags={"foo": "bar"}, + ) + ``` + +1. Create the endpoint: + + Using the `MLClient` created earlier, we'll now create the Endpoint in the workspace. This command will start the endpoint creation and return a confirmation response while the endpoint creation continues. + + ```python + ml_client.begin_create_or_update(endpoint) + ``` + +## Create the 'blue' deployment + +A deployment is a set of resources required for hosting the model that does the actual inferencing. We'll create a deployment for our endpoint using the `ManagedOnlineDeployment` class. This class allows user to configure the following key aspects. + +**Key aspects of deployment** +* `name` - Name of the deployment. +* `endpoint_name` - Name of the endpoint to create the deployment under. +* `model` - The model to use for the deployment. This value can be either a reference to an existing versioned model in the workspace or an inline model specification. +* `environment` - The environment to use for the deployment. This value can be either a reference to an existing versioned environment in the workspace or an inline environment specification. +* `code_configuration` - the configuration for the source code and scoring script + * `path`- Path to the source code directory for scoring the model + * `scoring_script` - Relative path to the scoring file in the source code directory +* `instance_type` - The VM size to use for the deployment. For the list of supported sizes, see [Managed online endpoints SKU list](reference-managed-online-endpoints-vm-sku-list.md). +* `instance_count` - The number of instances to use for the deployment + +1. Configure blue deployment: + + ```python + # create blue deployment + model = Model(path="../model-1/model/sklearn_regression_model.pkl") + env = Environment( + conda_file="../model-1/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", + ) + + blue_deployment = ManagedOnlineDeployment( + name="blue", + endpoint_name=online_endpoint_name, + model=model, + environment=env, + code_configuration=CodeConfiguration( + code="../model-1/onlinescoring", scoring_script="score.py" + ), + instance_type="Standard_F2s_v2", + instance_count=1, + ) + ``` + +1. Create the deployment: + + Using the `MLClient` created earlier, we'll now create the deployment in the workspace. This command will start the deployment creation and return a confirmation response while the deployment creation continues. + + ```python + ml_client.begin_create_or_update(blue_deployment) + ``` + + ```python + # blue deployment takes 100 traffic + endpoint.traffic = {"blue": 100} + ml_client.begin_create_or_update(endpoint) + ``` + +## Test the endpoint with sample data + +Using the `MLClient` created earlier, we'll get a handle to the endpoint. The endpoint can be invoked using the `invoke` command with the following parameters: + +* `endpoint_name` - Name of the endpoint +* `request_file` - File with request data +* `deployment_name` - Name of the specific deployment to test in an endpoint + +We'll send a sample request using a [json](https://github.com/Azure/azureml-examples/blob/main/sdk/endpoints/online/model-1/sample-request.json) file. + +```python +# test the blue deployment with some sample data +ml_client.online_endpoints.invoke( + endpoint_name=online_endpoint_name, + deployment_name="blue", + request_file="../model-1/sample-request.json", +) +``` + +## Scale the deployment + +Using the `MLClient` created earlier, we'll get a handle to the deployment. The deployment can be scaled by increasing or decreasing the `instance_count`. + +```python +# scale the deployment +blue_deployment = ml_client.online_deployments.get( + name="blue", endpoint_name=online_endpoint_name +) +blue_deployment.instance_count = 2 +ml_client.online_deployments.begin_create_or_update(blue_deployment) +``` + +## Get endpoint details + +```python +# Get the details for online endpoint +endpoint = ml_client.online_endpoints.get(name=online_endpoint_name) + +# existing traffic details +print(endpoint.traffic) + +# Get the scoring URI +print(endpoint.scoring_uri) +``` + +## Deploy a new model, but send no traffic yet + +Create a new deployment named green: + +```python +# create green deployment +model2 = Model(path="../model-2/model/sklearn_regression_model.pkl") +env2 = Environment( + conda_file="../model-2/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:20210727.v1", +) + +green_deployment = ManagedOnlineDeployment( + name="green", + endpoint_name=online_endpoint_name, + model=model2, + environment=env2, + code_configuration=CodeConfiguration( + code="../model-2/onlinescoring", scoring_script="score.py" + ), + instance_type="Standard_F2s_v2", + instance_count=1, +) +``` + +```python +# use MLClient to create green deployment +ml_client.begin_create_or_update(green_deployment) +``` + +## Test the 'green' deployment + +Though green has 0% of traffic allocated, you can still invoke the endpoint and deployment with [json](https://github.com/Azure/azureml-examples/blob/main/sdk/endpoints/online/model-2/sample-request.json) file. + +```python +ml_client.online_endpoints.invoke( + endpoint_name=online_endpoint_name, + deployment_name="green", + request_file="../model-2/sample-request.json", +) +``` + +1. Test the new deployment with a small percentage of live traffic: + + Once you've tested your green deployment, allocate a small percentage of traffic to it: + + ```python + endpoint.traffic = {"blue": 90, "green": 10} + ml_client.begin_create_or_update(endpoint) + ``` + + Now, your green deployment will receive 10% of requests. + +1. Send all traffic to your new deployment: + + Once you're satisfied that your green deployment is fully satisfactory, switch all traffic to it. + + ```python + endpoint.traffic = {"blue": 0, "green": 100} + ml_client.begin_create_or_update(endpoint) + ``` + +1. Remove the old deployment: + + ```python + ml_client.online_deployments.delete(name="blue", endpoint_name=online_endpoint_name) + ``` + +## Delete endpoint + +```python +ml_client.online_endpoints.begin_delete(name=online_endpoint_name) +``` + +## Next steps + +* Explore online endpoint samples - [https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints](https://github.com/Azure/azureml-examples/tree/main/sdk/endpoints) \ No newline at end of file diff --git a/articles/machine-learning/how-to-secure-online-endpoint.md b/articles/machine-learning/how-to-secure-online-endpoint.md index caaa24c308a1..a21c59e7308f 100644 --- a/articles/machine-learning/how-to-secure-online-endpoint.md +++ b/articles/machine-learning/how-to-secure-online-endpoint.md @@ -9,13 +9,13 @@ ms.topic: how-to ms.reviewer: larryfr ms.author: seramasu author: rsethur -ms.date: 04/22/2022 +ms.date: 05/26/2022 ms.custom: event-tier1-build-2022 --- # Use network isolation with managed online endpoints (preview) -When deploying a machine learning model to a managed online endpoint, you can secure communication with the online endpoint by using [private endpoints](/azure/private-link/private-endpoint-overview). Using a private endpoint with online endpoints is currently a preview feature. +When deploying a machine learning model to a managed online endpoint, you can secure communication with the online endpoint by using [private endpoints](../private-link/private-endpoint-overview.md). Using a private endpoint with online endpoints is currently a preview feature. [!INCLUDE [preview disclaimer](../../includes/machine-learning-preview-generic-disclaimer.md)] @@ -35,7 +35,7 @@ The following diagram shows how communications flow through private endpoints to * You must have an Azure Machine Learning workspace, and the workspace must use a private endpoint. If you don't have one, the steps in this article create an example workspace, VNet, and VM. For more information, see [Configure a private endpoint for Azure Machine Learning workspace](how-to-configure-private-link.md). -* The Azure Container Registry for your workspace must be configured for __Premium__ tier. For more information, see [Azure Container Registry service tiers](/azure/container-registry/container-registry-skus). +* The Azure Container Registry for your workspace must be configured for __Premium__ tier. For more information, see [Azure Container Registry service tiers](../container-registry/container-registry-skus.md). * The Azure Container Registry and Azure Storage Account must be in the same Azure Resource Group as the workspace. @@ -49,6 +49,7 @@ The following diagram shows how communications flow through private endpoints to ## Limitations +* The `v1_legacy_mode` flag must be disabled (false) on your Azure Machine Learning workspace. If this flag is enabled, you won't be able to create a managed online endpoint. For more information, see [Network isolation with v2 API](how-to-configure-network-isolation-with-v2.md). * If your Azure Machine Learning workspace has a private endpoint that was created before May 24, 2022, you must recreate the workspace's private endpoint before configuring your online endpoints to use a private endpoint. For more information on creating a private endpoint for your workspace, see [How to configure a private endpoint for Azure Machine Learning workspace](how-to-configure-private-link.md). * Secure outbound communication creates three private endpoints per deployment. One to Azure Blob storage, one to Azure Container Registry, and one to your workspace. @@ -127,9 +128,9 @@ The following diagram shows the overall architecture of this example: To create the resources, use the following Azure CLI commands. Replace `` with a unique suffix for the resources that are created. -:::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/setup-repo/azure-github.sh" id="managed_vnet_workspace_suffix"::: +:::code language="azurecli" source="~/azureml-examples-main/setup-repo/azure-github.sh" id="managed_vnet_workspace_suffix"::: -:::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/setup-repo/azure-github.sh" id="managed_vnet_workspace_create"::: +:::code language="azurecli" source="~/azureml-examples-main/setup-repo/azure-github.sh" id="managed_vnet_workspace_create"::: ### Create the virtual machine jump box @@ -171,7 +172,7 @@ When prompted, enter the password you used when creating the VM. 1. Use the following commands from the SSH session to install the CLI and Docker: - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/vmsetup.sh" id="setup_docker_az_cli"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/vmsetup.sh" id="setup_docker_az_cli"::: 1. To create the environment variables used by this example, run the following commands. Replace `` with your Azure subscription ID. Replace `` with the resource group that contains your workspace. Replace `` with the suffix you provided earlier. Replace `` with the location of your Azure workspace. Replace `` with the name to use for the endpoint. @@ -180,11 +181,11 @@ When prompted, enter the password you used when creating the VM. # [Generic model](#tab/model) - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/deploy-moe-vnet.sh" id="set_env_vars"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/deploy-moe-vnet.sh" id="set_env_vars"::: # [MLflow model](#tab/mlflow) - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/deploy-moe-vnet-mlflow.sh" id="set_env_vars"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/deploy-moe-vnet-mlflow.sh" id="set_env_vars"::: --- @@ -194,7 +195,7 @@ When prompted, enter the password you used when creating the VM. 1. To configure the defaults for the CLI, use the following commands: - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/vmsetup.sh" id="configure_defaults"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/vmsetup.sh" id="configure_defaults"::: 1. To clone the example files for the deployment, use the following command: @@ -204,7 +205,7 @@ When prompted, enter the password you used when creating the VM. 1. To build a custom docker image to use with the deployment, use the following commands: - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/build_image.sh" id="build_image"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/build_image.sh" id="build_image"::: > [!TIP] > In this example, we build the Docker image before pushing it to Azure Container Registry. Alternatively, you can build the image in your vnet by using an Azure Machine Learning compute cluster and environments. For more information, see [Secure Azure Machine Learning workspace](how-to-secure-workspace-vnet.md#enable-azure-container-registry-acr). @@ -216,22 +217,22 @@ When prompted, enter the password you used when creating the VM. > [!TIP] > You can test or debug the Docker image locally by using the `--local` flag when creating the deployment. For more information, see the [Deploy and debug locally](how-to-deploy-managed-online-endpoints.md#deploy-and-debug-locally-by-using-local-endpoints) article. - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/create_moe.sh" id="create_vnet_deployment"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/create_moe.sh" id="create_vnet_deployment"::: 1. To make a scoring request with the endpoint, use the following commands: - :::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/endpoints/online/managed/vnet/setup_vm/scripts/score_endpoint.sh" id="check_deployment"::: + :::code language="azurecli" source="~/azureml-examples-main/cli/endpoints/online/managed/vnet/setup_vm/scripts/score_endpoint.sh" id="check_deployment"::: ### Cleanup To delete the endpoint, use the following command: -:::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/deploy-moe-vnet.sh" id="delete_endpoint"::: +:::code language="azurecli" source="~/azureml-examples-main/cli/deploy-moe-vnet.sh" id="delete_endpoint"::: To delete the VM, use the following command: -:::code language="azurecli" source="~/azureml-examples-online-endpoint-vnet/cli/deploy-moe-vnet.sh" id="delete_vm"::: +:::code language="azurecli" source="~/azureml-examples-main/cli/deploy-moe-vnet.sh" id="delete_vm"::: To delete all the resources created in this article, use the following command. Replace `` with the name of the resource group used in this example: @@ -249,4 +250,4 @@ az group delete --resource-group - [How to autoscale managed online endpoints](how-to-autoscale-endpoints.md) - [View costs for an Azure Machine Learning managed online endpoint](how-to-view-online-endpoints-costs.md) - [Access Azure resources with a online endpoint and managed identity](how-to-access-resources-from-endpoints-managed-identities.md) -- [Troubleshoot online endpoints deployment](how-to-troubleshoot-online-endpoints.md) +- [Troubleshoot online endpoints deployment](how-to-troubleshoot-online-endpoints.md) \ No newline at end of file diff --git a/articles/machine-learning/how-to-secure-training-vnet.md b/articles/machine-learning/how-to-secure-training-vnet.md index c459be92e92f..9010a11d56ea 100644 --- a/articles/machine-learning/how-to-secure-training-vnet.md +++ b/articles/machine-learning/how-to-secure-training-vnet.md @@ -233,7 +233,7 @@ When the creation process finishes, you train your model by using the cluster in When you enable **No public IP**, your compute cluster doesn't use a public IP for communication with any dependencies. Instead, it communicates solely within the virtual network using Azure Private Link ecosystem and service/private endpoints, eliminating the need for a public IP entirely. No public IP removes access and discoverability of compute cluster nodes from the internet thus eliminating a significant threat vector. **No public IP** clusters help comply with no public IP policies many enterprises have. > [!WARNING] -> By default, you do not have public internet access from No Public IP Compute Cluster. You need to configure User Defined Routing (UDR) to reach to a public IP to access the internet. For example, you can use a public IP of your firewall, or you can use [Virtual Network NAT](/azure/virtual-network/nat-gateway/nat-overview) with a public IP. +> By default, you do not have public internet access from No Public IP Compute Cluster. You need to configure User Defined Routing (UDR) to reach to a public IP to access the internet. For example, you can use a public IP of your firewall, or you can use [Virtual Network NAT](../virtual-network/nat-gateway/nat-overview.md) with a public IP. A compute cluster with **No public IP** enabled has **no inbound communication requirements** from public internet. Specifically, neither inbound NSG rule (`BatchNodeManagement`, `AzureMachineLearning`) is required. You still need to allow inbound from source of **VirtualNetwork** and any port source, to destination of **VirtualNetwork**, and destination port of **29876, 29877** and inbound from source **AzureLoadBalancer** and any port source to destination **VirtualNetwork** and port **44224** destination. @@ -266,7 +266,7 @@ For steps on how to create a compute instance deployed in a virtual network, see When you enable **No public IP**, your compute instance doesn't use a public IP for communication with any dependencies. Instead, it communicates solely within the virtual network using Azure Private Link ecosystem and service/private endpoints, eliminating the need for a public IP entirely. No public IP removes access and discoverability of compute instance node from the internet thus eliminating a significant threat vector. Compute instances will also do packet filtering to reject any traffic from outside virtual network. **No public IP** instances are dependent on [Azure Private Link](how-to-configure-private-link.md) for Azure Machine Learning workspace. > [!WARNING] -> By default, you do not have public internet access from No Public IP Compute Instance. You need to configure User Defined Routing (UDR) to reach to a public IP to access the internet. For example, you can use a public IP of your firewall, or you can use [Virtual Network NAT](/azure/virtual-network/nat-gateway/nat-overview) with a public IP. +> By default, you do not have public internet access from No Public IP Compute Instance. You need to configure User Defined Routing (UDR) to reach to a public IP to access the internet. For example, you can use a public IP of your firewall, or you can use [Virtual Network NAT](../virtual-network/nat-gateway/nat-overview.md) with a public IP. For **outbound connections** to work, you need to set up an egress firewall such as Azure firewall with user defined routes. For instance, you can use a firewall set up with [inbound/outbound configuration](how-to-access-azureml-behind-firewall.md) and route traffic there by defining a route table on the subnet in which the compute instance is deployed. The route table entry can set up the next hop of the private IP address of the firewall with the address prefix of 0.0.0.0/0. @@ -345,4 +345,4 @@ This article is part of a series on securing an Azure Machine Learning workflow. * If using CLI v2 or SDK v2 - [Network isolation for managed online endpoints](how-to-secure-online-endpoint.md) * [Enable studio functionality](how-to-enable-studio-virtual-network.md) * [Use custom DNS](how-to-custom-dns.md) -* [Use a firewall](how-to-access-azureml-behind-firewall.md) +* [Use a firewall](how-to-access-azureml-behind-firewall.md) \ No newline at end of file diff --git a/articles/machine-learning/how-to-train-cli.md b/articles/machine-learning/how-to-train-cli.md index 1be08fc48769..1672a00df439 100644 --- a/articles/machine-learning/how-to-train-cli.md +++ b/articles/machine-learning/how-to-train-cli.md @@ -8,7 +8,7 @@ ms.subservice: core ms.topic: how-to author: amibp ms.author: amipatel -ms.date: 03/31/2022 +ms.date: 05/26/2022 ms.reviewer: nibaccam ms.custom: devx-track-azurecli, devplatv2, event-tier1-build-2022 --- @@ -327,11 +327,11 @@ The following example shows an AutoML configuration file for training a classifi * The training has a time out of 180 minutes * The data for training is in the folder "./training-mltable-folder". Automated ML jobs only accept data in the form of an `MLTable`. -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/basics/hello-automl/hello-automl-job-basic.yml"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/basics/hello-automl/hello-automl-job-basic.yml"::: That mentioned MLTable definition is what points to the training data file, in this case a local .csv file that will be uploaded automatically: -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/basics/hello-automl/training-mltable-folder/MLTable"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/basics/hello-automl/training-mltable-folder/MLTable"::: Finally, you can run it (create the AutoML job) with this CLI command: @@ -345,7 +345,7 @@ Or like the following if providing workspace IDs explicitly instead of using the /> az ml job create --file ./hello-automl-job-basic.yml --workspace-name [YOUR_AZURE_WORKSPACE] --resource-group [YOUR_AZURE_RESOURCE_GROUP] --subscription [YOUR_AZURE_SUBSCRIPTION] ``` -To investigate additional AutoML model training examples using other ML-tasks such as regression, time-series forecasting, image classification, object detection, NLP text-classification, etc., see the complete list of [AutoML CLI examples](https://github.com/Azure/azureml-examples/tree/sdk-preview/cli/jobs/automl-standalone-jobs). +To investigate additional AutoML model training examples using other ML-tasks such as regression, time-series forecasting, image classification, object detection, NLP text-classification, etc., see the complete list of [AutoML CLI examples](https://github.com/Azure/azureml-examples/tree/main/cli/jobs/automl-standalone-jobs). ### Train a model with a custom script diff --git a/articles/machine-learning/how-to-train-sdk.md b/articles/machine-learning/how-to-train-sdk.md index ac9924c7874c..db5c7271d570 100644 --- a/articles/machine-learning/how-to-train-sdk.md +++ b/articles/machine-learning/how-to-train-sdk.md @@ -8,7 +8,7 @@ ms.author: balapv ms.reviewer: sgilley ms.service: machine-learning ms.subservice: core -ms.date: 05/10/2022 +ms.date: 05/26/2022 ms.topic: how-to ms.custom: sdkv2, event-tier1-build-2022 --- @@ -109,7 +109,7 @@ ml_client = MLClient(DefaultAzureCredential(), subscription_id, resource_group, You'll create a compute called `cpu-cluster` for your job, with this code: -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/configuration.ipynb?name=create-cpu-compute)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/configuration.ipynb?name=create-cpu-compute)] ### 3. Environment to run the script @@ -130,9 +130,9 @@ You'll use a curated environment provided by Azure ML for `lightgm` called `Azur To run this script, you'll use a `command`. The command will be run by submitting it as a `job` to Azure ML. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=create-command)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=create-command)] -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=run-command)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=run-command)] In the above, you configured: @@ -150,14 +150,14 @@ To perform a sweep, there needs to be input(s) against which the sweep needs to Let us improve our model by sweeping on `learning_rate` and `boosting` inputs to the script. In the previous step, you used a specific value for these parameters, but now you'll use a range or choice of values. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=search-space)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=search-space)] Now that you've defined the parameters, run the sweep -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=configure-sweep)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=configure-sweep)] -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=run-sweep)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/single-step/lightgbm/iris/lightgbm-iris-sweep.ipynb?name=run-sweep)] As seen above, the `sweep` function allows user to configure the following key aspects: diff --git a/articles/machine-learning/how-to-use-batch-endpoint-sdk-v2.md b/articles/machine-learning/how-to-use-batch-endpoint-sdk-v2.md new file mode 100644 index 000000000000..bb32bae1483b --- /dev/null +++ b/articles/machine-learning/how-to-use-batch-endpoint-sdk-v2.md @@ -0,0 +1,225 @@ +--- +title: 'Use batch endpoints for batch scoring using Python SDK v2 (preview)' +titleSuffix: Azure Machine Learning +description: In this article, learn how to create a batch endpoint to continuously batch score large data using Python SDK v2 (preview). +services: machine-learning +ms.service: machine-learning +ms.subservice: mlops +ms.topic: how-to +author: shivanissambare +ms.author: ssambare +ms.reviewer: larryfr +ms.date: 05/25/2022 +ms.custom: how-to, devplatv2, sdkv2 +#Customer intent: As an ML engineer or data scientist, I want to create an endpoint to host my models for batch scoring, so that I can use the same endpoint continuously for different large datasets on-demand or on-schedule. +--- + +# Use batch endpoints for batch scoring using Python SDK v2 (preview) + +[!INCLUDE [sdk v2](../../includes/machine-learning-sdk-v2.md)] + +> [!IMPORTANT] +> SDK v2 is currently in public preview. +> The preview version is provided without a service level agreement, and it's not recommended for production workloads. Certain features might not be supported or might have constrained capabilities. +> For more information, see [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/). + +Learn how to use batch endpoints to do batch scoring using Python SDK v2. Batch endpoints simplify the process of hosting your models for batch scoring, so you can focus on machine learning, not infrastructure. For more information, see [What are Azure Machine Learning endpoints?](concept-endpoints.md). + +In this article, you'll learn to: + +* Connect to your Azure machine learning workspace from the Python SDK v2. +* Create a batch endpoint from Python SDK v2. +* Create deployments on that endpoint from Python SDK v2. +* Test a deployment with a sample request. + +## Prerequisites + +* A basic understanding of Machine Learning. +* An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/). +* An Azure ML workspace with computer cluster to run your batch scoring job. +* The [Azure Machine Learning SDK v2 for Python](/python/api/overview/azure/ml/installv2). + + +## 1. Connect to Azure Machine Learning workspace + +The [workspace](concept-workspace.md) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section, we'll connect to the workspace in which the job will be run. + +1. Import the required libraries: + + ```python + # import required libraries + from azure.ai.ml import MLClient, Input + from azure.ai.ml.entities import ( + BatchEndpoint, + BatchDeployment, + Model, + Environment, + BatchRetrySettings, + ) + from azure.ai.ml.entities._assets import Dataset + from azure.identity import DefaultAzureCredential + from azure.ai.ml.constants import BatchDeploymentOutputAction + ``` + +1. Configure workspace details and get a handle to the workspace: + + To connect to a workspace, we need identifier parameters - a subscription, resource group and workspace name. We'll use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. This example uses the [default Azure authentication](/python/api/azure-identity/azure.identity.defaultazurecredential). + + ```python + # enter details of your AML workspace + subscription_id = "" + resource_group = "" + workspace = "" + ``` + + ```python + # get a handle to the workspace + ml_client = MLClient( + DefaultAzureCredential(), subscription_id, resource_group, workspace + ) + ``` + +## Create batch endpoint + +Batch endpoints are endpoints that are used batch inferencing on large volumes of data over a period of time. Batch endpoints receive pointers to data and run jobs asynchronously to process the data in parallel on compute clusters. Batch endpoints store outputs to a data store for further analysis. + +To create an online endpoint, we'll use `BatchEndpoint`. This class allows user to configure the following key aspects: + +* `name` - Name of the endpoint. Needs to be unique at the Azure region level +* `auth_mode` - The authentication method for the endpoint. Currently only Azure Active Directory (Azure AD) token-based (`aad_token`) authentication is supported. +* `identity`- The managed identity configuration for accessing Azure resources for endpoint provisioning and inference. +* `defaults` - Default settings for the endpoint. + * `deployment_name` - Name of the deployment that will serve as the default deployment for the endpoint. +* `description`- Description of the endpoint. + +1. Configure the endpoint: + + ```python + # Creating a unique endpoint name with current datetime to avoid conflicts + import datetime + + batch_endpoint_name = "my-batch-endpoint-" + datetime.datetime.now().strftime( + "%Y%m%d%H%M" + ) + + # create a batch endpoint + endpoint = BatchEndpoint( + name=batch_endpoint_name, + description="this is a sample batch endpoint", + tags={"foo": "bar"}, + ) + ``` + +1. Create the endpoint: + + Using the `MLClient` created earlier, we'll now create the Endpoint in the workspace. This command will start the endpoint creation and return a confirmation response while the endpoint creation continues. + + ```python + ml_client.begin_create_or_update(endpoint) + ``` + +## Create a deployment + +A deployment is a set of resources required for hosting the model that does the actual inferencing. We'll create a deployment for our endpoint using the `BatchDeployment` class. This class allows user to configure the following key aspects. + +* `name` - Name of the deployment. +* `endpoint_name` - Name of the endpoint to create the deployment under. +* `model` - The model to use for the deployment. This value can be either a reference to an existing versioned model in the workspace or an inline model specification. +* `environment` - The environment to use for the deployment. This value can be either a reference to an existing versioned environment in the workspace or an inline environment specification. +* `code_path`- Path to the source code directory for scoring the model +* `scoring_script` - Relative path to the scoring file in the source code directory +* `compute` - Name of the compute target to execute the batch scoring jobs on +* `instance_count`- The number of nodes to use for each batch scoring job. +* `max_concurrency_per_instance`- The maximum number of parallel scoring_script runs per instance. +* `mini_batch_size` - The number of files the code_configuration.scoring_script can process in one `run`() call. +* `retry_settings`- Retry settings for scoring each mini batch. + * `max_retries`- The maximum number of retries for a failed or timed-out mini batch (default is 3) + * `timeout`- The timeout in seconds for scoring a mini batch (default is 30) +* `output_action`- Indicates how the output should be organized in the output file. Allowed values are `append_row` or `summary_only`. Default is `append_row` +* `output_file_name`- Name of the batch scoring output file. Default is `predictions.csv` +* `environment_variables`- Dictionary of environment variable name-value pairs to set for each batch scoring job. +* `logging_level`- The log verbosity level. Allowed values are `warning`, `info`, `debug`. Default is `info`. + +1. Configure the deployment: + + ```python + # create a batch deployment + model = Model(path="./mnist/model/") + env = Environment( + conda_file="./mnist/environment/conda.yml", + image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:latest", + ) + deployment = BatchDeployment( + name="non-mlflow-deployment", + description="this is a sample non-mlflow deployment", + endpoint_name=batch_endpoint_name, + model=model, + code_path="./mnist/code/", + scoring_script="digit_identification.py", + environment=env, + compute="cpu-cluster", + instance_count=2, + max_concurrency_per_instance=2, + mini_batch_size=10, + output_action=BatchDeploymentOutputAction.APPEND_ROW, + output_file_name="predictions.csv", + retry_settings=BatchRetrySettings(max_retries=3, timeout=30), + logging_level="info", + ) + ``` + +1. Create the deployment: + + Using the `MLClient` created earlier, we'll now create the deployment in the workspace. This command will start the deployment creation and return a confirmation response while the deployment creation continues. + + ```python + ml_client.begin_create_or_update(deployment) + ``` + +## Test the endpoint with sample data + +Using the `MLClient` created earlier, we'll get a handle to the endpoint. The endpoint can be invoked using the `invoke` command with the following parameters: + +* `name` - Name of the endpoint +* `input_path` - Path where input data is present +* `deployment_name` - Name of the specific deployment to test in an endpoint + +1. Invoke the endpoint: + + ```python + # create a dataset form the folderpath + input = Input(path="https://pipelinedata.blob.core.windows.net/sampledata/mnist") + + # invoke the endpoint for batch scoring job + job = ml_client.batch_endpoints.invoke( + endpoint_name=batch_endpoint_name, + input_data=input, + deployment_name="non-mlflow-deployment", # name is required as default deployment is not set + params_override=[{"mini_batch_size": "20"}, {"compute.instance_count": "4"}], + ) + ``` + +1. Get the details of the invoked job: + + Let us get details and logs of the invoked job + + ```python + # get the details of the job + job_name = job.name + batch_job = ml_client.jobs.get(name=job_name) + print(batch_job.status) + # stream the job logs + ml_client.jobs.stream(name=job_name) + ``` + +## Clean up resources + +Delete endpoint + +```python +ml_client.batch_endpoints.begin_delete(name=batch_endpoint_name) +``` + +## Next steps + +If you encounter problems using batch endpoints, see [Troubleshooting batch endpoints](how-to-troubleshoot-batch-endpoints.md). \ No newline at end of file diff --git a/articles/machine-learning/how-to-use-data.md b/articles/machine-learning/how-to-use-data.md index 97d4b098e118..acc3e1f7aa7c 100644 --- a/articles/machine-learning/how-to-use-data.md +++ b/articles/machine-learning/how-to-use-data.md @@ -74,8 +74,6 @@ These snippets use `uri_file` and `uri_folder`. > > If you wanted to pass in just an individual file rather than the entire folder you can use the `uri_file` type. -For a complete example, see the [working_with_uris.ipynb notebook](https://github.com/azure/azureml-previews/sdk/docs/working_with_uris.ipynb). - Below are some common data access patterns that you can use in your *control-plane* code to submit a job to Azure Machine Learning: ### Use data with a training job diff --git a/articles/machine-learning/how-to-use-sweep-in-pipeline.md b/articles/machine-learning/how-to-use-sweep-in-pipeline.md index 56eaeff00006..d29a36926990 100644 --- a/articles/machine-learning/how-to-use-sweep-in-pipeline.md +++ b/articles/machine-learning/how-to-use-sweep-in-pipeline.md @@ -8,7 +8,7 @@ ms.subservice: mlops ms.topic: how-to author: xiaoharper ms.author: zhanxia -ms.date: 05/10/2022 +ms.date: 05/26/2022 ms.custom: devx-track-python, sdkv2, cliv2, event-tier1-build-2022 --- @@ -37,19 +37,19 @@ The example used in this article can be found in [azureml-example repo](https:// Assume you already have a command component defined in `train.yaml`. A two-step pipeline job (train and predict) YAML file looks like below. -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/pipeline.yml" highlight="7-48"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/pipeline.yml" highlight="7-48"::: The `sweep_step` is the step for hyperparameter tuning. Its type needs to be `sweep`. And `trial` refers to the command component defined in `train.yaml`. From the `search sapce` field we can see three hyparmeters (`c_value`, `kernel`, and `coef`) are added to the search space. After you submit this pipeline job, Azure Machine Learning will run the trial component multiple times to sweep over hyperparameters based on the search space and terminate policy you defined in `sweep_step`. Check [sweep job YAML schema](reference-yaml-job-sweep.md) for full schema of sweep job. Below is the trial component definition (train.yml file). -:::code language="yaml" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/train.yml" highlight="11-16,23-25,60"::: +:::code language="yaml" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/train.yml" highlight="11-16,23-25,60"::: The hyperparameters added to search space in pipeline.yml need to be inputs for the trial component. The source code of the trial component is under `./train-src` folder. In this example, it's a single `train.py` file. This is the code that will be executed in every trial of the sweep job. Make sure you've logged the metrics in the trial component source code with exactly the same name as `primary_metric` value in pipeline.yml file. In this example, we use `mlflow.autolog()`, which is the recommended way to track your ML experiments. See more about mlflow [here](./how-to-use-mlflow-cli-runs.md) Below code snippet is the source code of trial component. -:::code language="python" source="~/azureml-examples-sdk-preview/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/train-src/train.py" highlight="15"::: +:::code language="python" source="~/azureml-examples-main/cli/jobs/pipelines-with-components/pipeline_with_hyperparameter_sweep/train-src/train.py" highlight="15"::: ### Python SDK @@ -59,7 +59,7 @@ In Azure Machine Learning Python SDK v2, you can enable hyperparameter tuning fo Below code snippet shows how to enable sweep for `train_model`. -[!notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/pipelines/1c_pipeline_with_hyperparameter_sweep/pipeline_with_hyperparameter_sweep.ipynb?name=enable-sweep)] +[!notebook-python[] (~/azureml-examples-main/sdk/jobs/pipelines/1c_pipeline_with_hyperparameter_sweep/pipeline_with_hyperparameter_sweep.ipynb?name=enable-sweep)] We first load `train_component_func` defined in `train.yml` file. When creating `train_model`, we add `c_value`, `kernel` and `coef0` into search space(line 15-17). Line 30-35 defines the primary metric, sampling algorithm etc. diff --git a/articles/machine-learning/media/concept-component/upgrade-component.png b/articles/machine-learning/media/concept-component/upgrade-component.png index 0fe6cd6044ab..120e9efe70c2 100644 Binary files a/articles/machine-learning/media/concept-component/upgrade-component.png and b/articles/machine-learning/media/concept-component/upgrade-component.png differ diff --git a/articles/machine-learning/media/concept-compute-instance/manage-compute-instance.png b/articles/machine-learning/media/concept-compute-instance/manage-compute-instance.png index e70f1de31b7e..cec83c2e779a 100644 Binary files a/articles/machine-learning/media/concept-compute-instance/manage-compute-instance.png and b/articles/machine-learning/media/concept-compute-instance/manage-compute-instance.png differ diff --git a/articles/machine-learning/media/concept-designer/designer-workflow-diagram.png b/articles/machine-learning/media/concept-designer/designer-workflow-diagram.png index b2b842ba239a..4044dce9adfc 100644 Binary files a/articles/machine-learning/media/concept-designer/designer-workflow-diagram.png and b/articles/machine-learning/media/concept-designer/designer-workflow-diagram.png differ diff --git a/articles/machine-learning/media/concept-secure-network-traffic-flow/storage-traffic-studio.png b/articles/machine-learning/media/concept-secure-network-traffic-flow/storage-traffic-studio.png index f1ed2ab1ed2c..bdfb6e48413f 100644 Binary files a/articles/machine-learning/media/concept-secure-network-traffic-flow/storage-traffic-studio.png and b/articles/machine-learning/media/concept-secure-network-traffic-flow/storage-traffic-studio.png differ diff --git a/articles/machine-learning/media/how-to-autoscale-endpoints/configure-autoscale.png b/articles/machine-learning/media/how-to-autoscale-endpoints/configure-autoscale.png index 53d3be8b2db6..e9260832f8c8 100644 Binary files a/articles/machine-learning/media/how-to-autoscale-endpoints/configure-autoscale.png and b/articles/machine-learning/media/how-to-autoscale-endpoints/configure-autoscale.png differ diff --git a/articles/machine-learning/media/how-to-create-attach-studio/create-compute-target.png b/articles/machine-learning/media/how-to-create-attach-studio/create-compute-target.png index 822af6f488c5..c60c389886ec 100644 Binary files a/articles/machine-learning/media/how-to-create-attach-studio/create-compute-target.png and b/articles/machine-learning/media/how-to-create-attach-studio/create-compute-target.png differ diff --git a/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png b/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png index c2926f524654..b14c4504ab74 100644 Binary files a/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png and b/articles/machine-learning/media/how-to-create-labeling-projects/exported-dataset.png differ diff --git a/articles/machine-learning/media/how-to-create-your-first-pipeline/pipeline-endpoints.png b/articles/machine-learning/media/how-to-create-your-first-pipeline/pipeline-endpoints.png index 786a48d47c0c..679d66d9000b 100644 Binary files a/articles/machine-learning/media/how-to-create-your-first-pipeline/pipeline-endpoints.png and b/articles/machine-learning/media/how-to-create-your-first-pipeline/pipeline-endpoints.png differ diff --git a/articles/machine-learning/media/how-to-deploy-automl-endpoint/download-model.png b/articles/machine-learning/media/how-to-deploy-automl-endpoint/download-model.png index a3577c73bff4..884f0f827af6 100644 Binary files a/articles/machine-learning/media/how-to-deploy-automl-endpoint/download-model.png and b/articles/machine-learning/media/how-to-deploy-automl-endpoint/download-model.png differ diff --git a/articles/machine-learning/media/how-to-deploy-local/registered-model.png b/articles/machine-learning/media/how-to-deploy-local/registered-model.png index b8d58cc969d8..d07c252fc047 100644 Binary files a/articles/machine-learning/media/how-to-deploy-local/registered-model.png and b/articles/machine-learning/media/how-to-deploy-local/registered-model.png differ diff --git a/articles/machine-learning/media/how-to-deploy-model-designer/models-asset-page.png b/articles/machine-learning/media/how-to-deploy-model-designer/models-asset-page.png index 7de046c6a3df..06d834d8a8b0 100644 Binary files a/articles/machine-learning/media/how-to-deploy-model-designer/models-asset-page.png and b/articles/machine-learning/media/how-to-deploy-model-designer/models-asset-page.png differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/ncd-triton.png b/articles/machine-learning/media/how-to-deploy-with-triton/ncd-triton.png index 9eb0c7020a9f..665a0f34d17a 100644 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/ncd-triton.png and b/articles/machine-learning/media/how-to-deploy-with-triton/ncd-triton.png differ diff --git a/articles/machine-learning/media/how-to-deploy-with-triton/triton-model-format.png b/articles/machine-learning/media/how-to-deploy-with-triton/triton-model-format.png index 32efb8ffefb3..4e5c1af52e82 100644 Binary files a/articles/machine-learning/media/how-to-deploy-with-triton/triton-model-format.png and b/articles/machine-learning/media/how-to-deploy-with-triton/triton-model-format.png differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/create-compute-cluster.png b/articles/machine-learning/media/how-to-enable-virtual-network/create-compute-cluster.png index 4addf83192b9..bb84093162ad 100644 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/create-compute-cluster.png and b/articles/machine-learning/media/how-to-enable-virtual-network/create-compute-cluster.png differ diff --git a/articles/machine-learning/media/how-to-enable-virtual-network/create-inference.png b/articles/machine-learning/media/how-to-enable-virtual-network/create-inference.png index b00d598e6df2..fcda93dec789 100644 Binary files a/articles/machine-learning/media/how-to-enable-virtual-network/create-inference.png and b/articles/machine-learning/media/how-to-enable-virtual-network/create-inference.png differ diff --git a/articles/machine-learning/media/how-to-log-view-metrics/download-logs.png b/articles/machine-learning/media/how-to-log-view-metrics/download-logs.png index feec32a9b975..0fbf48386cb1 100644 Binary files a/articles/machine-learning/media/how-to-log-view-metrics/download-logs.png and b/articles/machine-learning/media/how-to-log-view-metrics/download-logs.png differ diff --git a/articles/machine-learning/media/how-to-machine-learning-interpretability-automl/automl-explanation.png b/articles/machine-learning/media/how-to-machine-learning-interpretability-automl/automl-explanation.png index ff2c7945a896..e765c87496a6 100644 Binary files a/articles/machine-learning/media/how-to-machine-learning-interpretability-automl/automl-explanation.png and b/articles/machine-learning/media/how-to-machine-learning-interpretability-automl/automl-explanation.png differ diff --git a/articles/machine-learning/media/how-to-responsible-ai-dashboard-ui/model-page.png b/articles/machine-learning/media/how-to-responsible-ai-dashboard-ui/model-page.png index 9978bd88ef37..1df800e95a3a 100644 Binary files a/articles/machine-learning/media/how-to-responsible-ai-dashboard-ui/model-page.png and b/articles/machine-learning/media/how-to-responsible-ai-dashboard-ui/model-page.png differ diff --git a/articles/machine-learning/media/how-to-retrain-designer/pipeline-endpoint.png b/articles/machine-learning/media/how-to-retrain-designer/pipeline-endpoint.png index 4f534ec00ba0..cb6616c39518 100644 Binary files a/articles/machine-learning/media/how-to-retrain-designer/pipeline-endpoint.png and b/articles/machine-learning/media/how-to-retrain-designer/pipeline-endpoint.png differ diff --git a/articles/machine-learning/media/how-to-run-batch-predictions-designer/rest-endpoint-details.png b/articles/machine-learning/media/how-to-run-batch-predictions-designer/rest-endpoint-details.png index 6286598de62d..47fd537654d4 100644 Binary files a/articles/machine-learning/media/how-to-run-batch-predictions-designer/rest-endpoint-details.png and b/articles/machine-learning/media/how-to-run-batch-predictions-designer/rest-endpoint-details.png differ diff --git a/articles/machine-learning/media/how-to-set-up-training-targets/compute-target-details.png b/articles/machine-learning/media/how-to-set-up-training-targets/compute-target-details.png index a70fcd689f70..0bb0b8dd8b8b 100644 Binary files a/articles/machine-learning/media/how-to-set-up-training-targets/compute-target-details.png and b/articles/machine-learning/media/how-to-set-up-training-targets/compute-target-details.png differ diff --git a/articles/machine-learning/media/how-to-set-up-vs-code-remote/studio-notebook-compute-instance-vs-code-launch.png b/articles/machine-learning/media/how-to-set-up-vs-code-remote/studio-notebook-compute-instance-vs-code-launch.png index aafe87f00187..29b2c3c47ede 100644 Binary files a/articles/machine-learning/media/how-to-set-up-vs-code-remote/studio-notebook-compute-instance-vs-code-launch.png and b/articles/machine-learning/media/how-to-set-up-vs-code-remote/studio-notebook-compute-instance-vs-code-launch.png differ diff --git a/articles/machine-learning/media/how-to-train-with-ui/left-nav-entry.png b/articles/machine-learning/media/how-to-train-with-ui/left-nav-entry.png index dfc4044d39f6..96153ec68841 100644 Binary files a/articles/machine-learning/media/how-to-train-with-ui/left-nav-entry.png and b/articles/machine-learning/media/how-to-train-with-ui/left-nav-entry.png differ diff --git a/articles/machine-learning/media/how-to-trigger-published-pipeline/scheduled-pipelines.png b/articles/machine-learning/media/how-to-trigger-published-pipeline/scheduled-pipelines.png index 4bd5c082391c..d27d9cb5a6ad 100644 Binary files a/articles/machine-learning/media/how-to-trigger-published-pipeline/scheduled-pipelines.png and b/articles/machine-learning/media/how-to-trigger-published-pipeline/scheduled-pipelines.png differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/hyperparameter-button.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/hyperparameter-button.png index 1532d9b5bcc0..b16cab3fca1b 100644 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/hyperparameter-button.png and b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/hyperparameter-button.png differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane-expanded.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane-expanded.png index ae74ffec471b..f776fce9151d 100644 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane-expanded.png and b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/nav-pane-expanded.png differ diff --git a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details-expanded.png b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details-expanded.png index 2ee6ed87593c..d15d1bfb5ee9 100644 Binary files a/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details-expanded.png and b/articles/machine-learning/media/how-to-use-automated-ml-for-ml-models/run-details-expanded.png differ diff --git a/articles/machine-learning/media/how-to-use-environments/ml-environment.png b/articles/machine-learning/media/how-to-use-environments/ml-environment.png index 2e51a515bab8..fbeb10ea69bb 100644 Binary files a/articles/machine-learning/media/how-to-use-environments/ml-environment.png and b/articles/machine-learning/media/how-to-use-environments/ml-environment.png differ diff --git a/articles/machine-learning/media/how-to-use-labeled-dataset/export-button.png b/articles/machine-learning/media/how-to-use-labeled-dataset/export-button.png index 0cf10b4cfa2a..2f01b6a1184b 100644 Binary files a/articles/machine-learning/media/how-to-use-labeled-dataset/export-button.png and b/articles/machine-learning/media/how-to-use-labeled-dataset/export-button.png differ diff --git a/articles/machine-learning/media/how-to-use-sweep-in-pipeline/pipeline-view.png b/articles/machine-learning/media/how-to-use-sweep-in-pipeline/pipeline-view.png index a3e6ebdca9b0..34083dac9210 100644 Binary files a/articles/machine-learning/media/how-to-use-sweep-in-pipeline/pipeline-view.png and b/articles/machine-learning/media/how-to-use-sweep-in-pipeline/pipeline-view.png differ diff --git a/articles/machine-learning/media/migrate-overview/aml-endpoint.png b/articles/machine-learning/media/migrate-overview/aml-endpoint.png index a2c1f7704084..d44b12b7ad75 100644 Binary files a/articles/machine-learning/media/migrate-overview/aml-endpoint.png and b/articles/machine-learning/media/migrate-overview/aml-endpoint.png differ diff --git a/articles/machine-learning/media/migrate-rebuild-web-service/create-retraining-pipeline.png b/articles/machine-learning/media/migrate-rebuild-web-service/create-retraining-pipeline.png index 308074b2edde..8b1d1533bec9 100644 Binary files a/articles/machine-learning/media/migrate-rebuild-web-service/create-retraining-pipeline.png and b/articles/machine-learning/media/migrate-rebuild-web-service/create-retraining-pipeline.png differ diff --git a/articles/machine-learning/media/migrate-register-dataset/register-dataset.png b/articles/machine-learning/media/migrate-register-dataset/register-dataset.png index 8ecaff9f321d..30cbca27ad5f 100644 Binary files a/articles/machine-learning/media/migrate-register-dataset/register-dataset.png and b/articles/machine-learning/media/migrate-register-dataset/register-dataset.png differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-folder.png b/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-folder.png index ae78cae3e7d0..bc56d8bcc40a 100644 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-folder.png and b/articles/machine-learning/media/tutorial-1st-experiment-hello-world/create-folder.png differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/clone-tutorials.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/clone-tutorials.png index 3c0c71344759..320a5e745f8d 100644 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/clone-tutorials.png and b/articles/machine-learning/media/tutorial-1st-experiment-sdk-setup/clone-tutorials.png differ diff --git a/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/model-download.png b/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/model-download.png index 51a788e8868e..0d436a068d5c 100644 Binary files a/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/model-download.png and b/articles/machine-learning/media/tutorial-1st-experiment-sdk-train/model-download.png differ diff --git a/articles/machine-learning/media/tutorial-first-experiment-automated-ml/get-started.png b/articles/machine-learning/media/tutorial-first-experiment-automated-ml/get-started.png index ecc5989d682b..f21679a7746a 100644 Binary files a/articles/machine-learning/media/tutorial-first-experiment-automated-ml/get-started.png and b/articles/machine-learning/media/tutorial-first-experiment-automated-ml/get-started.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-new-notebook.png b/articles/machine-learning/media/tutorial-power-bi/create-new-notebook.png index 8e035609e921..2a291518ea6c 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-new-notebook.png and b/articles/machine-learning/media/tutorial-power-bi/create-new-notebook.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/create-new-run.png b/articles/machine-learning/media/tutorial-power-bi/create-new-run.png index da983ff3f6d0..bc21939ef9e6 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/create-new-run.png and b/articles/machine-learning/media/tutorial-power-bi/create-new-run.png differ diff --git a/articles/machine-learning/media/tutorial-power-bi/endpoint.png b/articles/machine-learning/media/tutorial-power-bi/endpoint.png index afb25fd620db..052f1b51477c 100644 Binary files a/articles/machine-learning/media/tutorial-power-bi/endpoint.png and b/articles/machine-learning/media/tutorial-power-bi/endpoint.png differ diff --git a/articles/machine-learning/toc.yml b/articles/machine-learning/toc.yml index 6fa3ed694d97..e9357e8a0aaa 100644 --- a/articles/machine-learning/toc.yml +++ b/articles/machine-learning/toc.yml @@ -312,9 +312,6 @@ - name: Kubernetes cluster displayName: Azure Arc, Kubernetes, on-premise, multi-cloud href: how-to-attach-kubernetes-anywhere.md - - name: Azure Kubernetes Service - displayName: AKS, inference - href: how-to-create-attach-kubernetes.md - name: Use studio displayName: compute target, dsvm, Data Science Virtual Machine, local, cluster, ACI, container instance, Databricks, data lake, lake, HDI, HDInsight, low priority, managed identity href: how-to-create-attach-compute-studio.md @@ -489,10 +486,14 @@ items: - name: Online endpoints (real-time) items: - - name: Deploy an ML model with an online endpoint + - name: Deploy an ML model with an online endpoint (CLI) href: how-to-deploy-managed-online-endpoints.md - - name: Safe rollout for online endpoints + - name: Deploy an ML model with an online endpoint (SDK preview) + href: how-to-deploy-managed-online-endpoint-sdk-v2.md + - name: Safe rollout for online endpoints (CLI) href: how-to-safely-rollout-managed-endpoints.md + - name: Safe rollout for online endpoints (SDK preview) + href: how-to-safely-rollout-managed-endpoints-sdk-v2.md - name: Deployment scenarios items: - name: Deploy a MLflow model with an online endpoint @@ -507,7 +508,7 @@ - name: Use REST to deploy a model as an online endpoint href: how-to-deploy-with-rest.md - name: Deploy an AutoML model with an online endpoint - href: how-to-deploy-automl-endpoint.md + href: how-to-deploy-automl-endpoint.md - name: Security items: - name: Authenticate to endpoints @@ -531,8 +532,10 @@ href: how-to-troubleshoot-online-endpoints.md - name: Batch endpoints items: - - name: Batch scoring with batch endpoints + - name: Batch scoring with batch endpoints (CLI) href: how-to-use-batch-endpoint.md + - name: Batch scoring with batch endpoints (SDK preview) + href: how-to-use-batch-endpoint-sdk-v2.md - name: Batch endpoints in studio href: how-to-use-batch-endpoints-studio.md - name: Use REST to deploy a model as a batch endpoint diff --git a/articles/machine-learning/tutorial-auto-train-image-models.md b/articles/machine-learning/tutorial-auto-train-image-models.md index 735dda33ab04..33040a8864d9 100644 --- a/articles/machine-learning/tutorial-auto-train-image-models.md +++ b/articles/machine-learning/tutorial-auto-train-image-models.md @@ -9,7 +9,7 @@ ms.topic: tutorial author: swatig007 ms.author: swatig ms.reviewer: nibaccam -ms.date: 04/15/2022 +ms.date: 05/26/2022 ms.custom: devx-track-python, automl, event-tier1-build-2022 --- @@ -253,13 +253,13 @@ az ml data create -f [PATH_TO_YML_FILE] --workspace-name [YOUR_AZURE_WORKSPACE] ``` # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=upload-data)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=upload-data)] --- Next step is to create `MLTable` from your data in jsonl format as shown below. MLtable package your data into a consumable object for training. -:::code language="yaml" source="~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: +:::code language="yaml" source="~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/data/training-mltable-folder/MLTable"::: # [CLI v2](#tab/CLI-v2) [!INCLUDE [cli v2](../../includes/machine-learning-cli-v2.md)] @@ -280,7 +280,7 @@ validation_data: You can create data inputs from training and validation MLTable with the following code: -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=data-load)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=data-load)] --- @@ -298,7 +298,7 @@ primary_metric: mean_average_precision # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=image-object-detection-configuration)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=image-object-detection-configuration)] --- @@ -346,9 +346,9 @@ search_space: # [Python SDK v2 (preview)](#tab/SDK-v2) -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=sweep-settings)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=sweep-settings)] -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=search-space-settings)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=search-space-settings)] --- @@ -368,7 +368,7 @@ az ml job create --file ./hello-automl-job-basic.yml --workspace-name [YOUR_AZUR When you've configured your AutoML Job to the desired settings, you can submit the job. -[!Notebook-python[] (~/azureml-examples-sdk-preview/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=submit-run)] +[!Notebook-python[] (~/azureml-examples-main/sdk/jobs/automl-standalone-jobs/automl-image-object-detection-task-fridge-items/automl-image-object-detection-task-fridge-items.ipynb?name=submit-run)] --- diff --git a/articles/machine-learning/v1/how-to-access-data.md b/articles/machine-learning/v1/how-to-access-data.md index 3c1437472b02..6ffcd0498088 100644 --- a/articles/machine-learning/v1/how-to-access-data.md +++ b/articles/machine-learning/v1/how-to-access-data.md @@ -72,10 +72,10 @@ Datastores currently support storing connection information to the storage servi | Storage type | Authentication type | [Azure Machine Learning studio](https://ml.azure.com/) | [Azure Machine Learning  Python SDK](/python/api/overview/azure/ml/intro) | [Azure Machine Learning CLI](reference-azure-machine-learning-cli.md) | [Azure Machine Learning  REST API](/rest/api/azureml/) | VS Code ---|---|---|---|---|---|--- -[Azure Blob Storage](/azure/storage/blobs/storage-blobs-overview)| Account key
    SAS token | ✓ | ✓ | ✓ |✓ |✓ -[Azure File Share](/azure/storage/files/storage-files-introduction)| Account key
    SAS token | ✓ | ✓ | ✓ |✓|✓ -[Azure Data Lake Storage Gen 1](/azure/data-lake-store/)| Service principal| ✓ | ✓ | ✓ |✓| -[Azure Data Lake Storage Gen 2](/azure/storage/blobs/data-lake-storage-introduction)| Service principal| ✓ | ✓ | ✓ |✓| +[Azure Blob Storage](../../storage/blobs/storage-blobs-overview.md)| Account key
    SAS token | ✓ | ✓ | ✓ |✓ |✓ +[Azure File Share](../../storage/files/storage-files-introduction.md)| Account key
    SAS token | ✓ | ✓ | ✓ |✓|✓ +[Azure Data Lake Storage Gen 1](../../data-lake-store/index.yml)| Service principal| ✓ | ✓ | ✓ |✓| +[Azure Data Lake Storage Gen 2](../../storage/blobs/data-lake-storage-introduction.md)| Service principal| ✓ | ✓ | ✓ |✓| [Azure SQL Database](/azure/azure-sql/database/sql-database-paas-overview)| SQL authentication
    Service principal| ✓ | ✓ | ✓ |✓| [Azure PostgreSQL](/azure/postgresql/overview) | SQL authentication| ✓ | ✓ | ✓ |✓| [Azure Database for MySQL](/azure/mysql/overview) | SQL authentication| | ✓* | ✓* |✓*| @@ -87,9 +87,9 @@ Datastores currently support storing connection information to the storage servi ### Storage guidance -We recommend creating a datastore for an [Azure Blob container](/azure/storage/blobs/storage-blobs-introduction). Both standard and premium storage are available for blobs. Although premium storage is more expensive, its faster throughput speeds might improve the speed of your training runs, particularly if you train against a large dataset. For information about the cost of storage accounts, see the [Azure pricing calculator](https://azure.microsoft.com/pricing/calculator/?service=machine-learning-service). +We recommend creating a datastore for an [Azure Blob container](../../storage/blobs/storage-blobs-introduction.md). Both standard and premium storage are available for blobs. Although premium storage is more expensive, its faster throughput speeds might improve the speed of your training runs, particularly if you train against a large dataset. For information about the cost of storage accounts, see the [Azure pricing calculator](https://azure.microsoft.com/pricing/calculator/?service=machine-learning-service). -[Azure Data Lake Storage Gen2](/azure/storage/blobs/data-lake-storage-introduction) is built on top of Azure Blob storage and designed for enterprise big data analytics. A fundamental part of Data Lake Storage Gen2 is the addition of a [hierarchical namespace](/azure/storage/blobs/data-lake-storage-namespace) to Blob storage. The hierarchical namespace organizes objects/files into a hierarchy of directories for efficient data access. +[Azure Data Lake Storage Gen2](../../storage/blobs/data-lake-storage-introduction.md) is built on top of Azure Blob storage and designed for enterprise big data analytics. A fundamental part of Data Lake Storage Gen2 is the addition of a [hierarchical namespace](../../storage/blobs/data-lake-storage-namespace.md) to Blob storage. The hierarchical namespace organizes objects/files into a hierarchy of directories for efficient data access. ## Storage access and permissions @@ -100,7 +100,7 @@ To ensure you securely connect to your Azure storage service, Azure Machine Lear ### Virtual network -Azure Machine Learning requires extra configuration steps to communicate with a storage account that is behind a firewall or within a virtual network. If your storage account is behind a firewall, you can [add your client's IP address to an allowlist](/azure/storage/common/storage-network-security#managing-ip-network-rules) via the Azure portal. +Azure Machine Learning requires extra configuration steps to communicate with a storage account that is behind a firewall or within a virtual network. If your storage account is behind a firewall, you can [add your client's IP address to an allowlist](../../storage/common/storage-network-security.md#managing-ip-network-rules) via the Azure portal. Azure Machine Learning can receive requests from clients outside of the virtual network. To ensure that the entity requesting data from the service is safe and to enable data being displayed in your workspace, [use a private endpoint with your workspace](../how-to-configure-private-link.md). @@ -137,7 +137,7 @@ You can find account key, SAS token, and service principal information on your [ ### Permissions -For Azure blob container and Azure Data Lake Gen 2 storage, make sure your authentication credentials have **Storage Blob Data Reader** access. Learn more about [Storage Blob Data Reader](/azure/role-based-access-control/built-in-roles#storage-blob-data-reader). An account SAS token defaults to no permissions. +For Azure blob container and Azure Data Lake Gen 2 storage, make sure your authentication credentials have **Storage Blob Data Reader** access. Learn more about [Storage Blob Data Reader](../../role-based-access-control/built-in-roles.md#storage-blob-data-reader). An account SAS token defaults to no permissions. * For data **read access**, your authentication credentials must have a minimum of list and read permissions for containers and objects. * For data **write access**, write and add permissions also are required. @@ -158,7 +158,7 @@ Within this section are examples for how to create and register a datastore via If you prefer a low code experience, see [Connect to data with Azure Machine Learning studio](../how-to-connect-data-ui.md). >[!IMPORTANT] -> If you unregister and re-register a datastore with the same name, and it fails, the Azure Key Vault for your workspace may not have soft-delete enabled. By default, soft-delete is enabled for the key vault instance created by your workspace, but it may not be enabled if you used an existing key vault or have a workspace created prior to October 2020. For information on how to enable soft-delete, see [Turn on Soft Delete for an existing key vault](/azure/key-vault/general/soft-delete-change#turn-on-soft-delete-for-an-existing-key-vault). +> If you unregister and re-register a datastore with the same name, and it fails, the Azure Key Vault for your workspace may not have soft-delete enabled. By default, soft-delete is enabled for the key vault instance created by your workspace, but it may not be enabled if you used an existing key vault or have a workspace created prior to October 2020. For information on how to enable soft-delete, see [Turn on Soft Delete for an existing key vault](../../key-vault/general/soft-delete-change.md#turn-on-soft-delete-for-an-existing-key-vault). > [!NOTE] @@ -204,9 +204,9 @@ file_datastore = Datastore.register_azure_file_share(workspace=ws, ### Azure Data Lake Storage Generation 2 -For an Azure Data Lake Storage Generation 2 (ADLS Gen 2) datastore, use [register_azure_data_lake_gen2()](/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-data-lake-gen2-workspace--datastore-name--filesystem--account-name--tenant-id--client-id--client-secret--resource-url-none--authority-url-none--protocol-none--endpoint-none--overwrite-false-) to register a credential datastore connected to an Azure DataLake Gen 2 storage with [service principal permissions](/azure/active-directory/develop/howto-create-service-principal-portal). +For an Azure Data Lake Storage Generation 2 (ADLS Gen 2) datastore, use [register_azure_data_lake_gen2()](/python/api/azureml-core/azureml.core.datastore.datastore#register-azure-data-lake-gen2-workspace--datastore-name--filesystem--account-name--tenant-id--client-id--client-secret--resource-url-none--authority-url-none--protocol-none--endpoint-none--overwrite-false-) to register a credential datastore connected to an Azure DataLake Gen 2 storage with [service principal permissions](../../active-directory/develop/howto-create-service-principal-portal.md). -In order to utilize your service principal, you need to [register your application](/azure/active-directory/develop/app-objects-and-service-principals) and grant the service principal data access via either Azure role-based access control (Azure RBAC) or access control lists (ACL). Learn more about [access control set up for ADLS Gen 2](/azure/storage/blobs/data-lake-storage-access-control-model). +In order to utilize your service principal, you need to [register your application](../../active-directory/develop/app-objects-and-service-principals.md) and grant the service principal data access via either Azure role-based access control (Azure RBAC) or access control lists (ACL). Learn more about [access control set up for ADLS Gen 2](../../storage/blobs/data-lake-storage-access-control-model.md). The following code creates and registers the `adlsgen2_datastore_name` datastore to the `ws` workspace. This datastore accesses the file system `test` in the `account_name` storage account, by using the provided service principal credentials. Review the [storage access & permissions](#storage-access-and-permissions) section for guidance on virtual network scenarios, and where to find required authentication credentials. @@ -304,4 +304,4 @@ Azure Data Factory provides efficient and resilient data transfer with more than * [Create an Azure machine learning dataset](how-to-create-register-datasets.md) * [Train a model](../how-to-set-up-training-targets.md) -* [Deploy a model](../how-to-deploy-and-where.md) +* [Deploy a model](../how-to-deploy-and-where.md) \ No newline at end of file diff --git a/articles/machine-learning/v1/how-to-create-register-datasets.md b/articles/machine-learning/v1/how-to-create-register-datasets.md index b1184de7c958..7a58d4493f62 100644 --- a/articles/machine-learning/v1/how-to-create-register-datasets.md +++ b/articles/machine-learning/v1/how-to-create-register-datasets.md @@ -24,7 +24,7 @@ ms.date: 05/11/2022 In this article, you learn how to create Azure Machine Learning datasets to access data for your local or remote experiments with the Azure Machine Learning Python SDK. To understand where datasets fit in Azure Machine Learning's overall data access workflow, see the [Securely access data](concept-data.md#data-workflow) article. -By creating a dataset, you create a reference to the data source location, along with a copy of its metadata. Because the data remains in its existing location, you incur no extra storage cost, and don't risk the integrity of your data sources. Also datasets are lazily evaluated, which aids in workflow performance speeds. You can create datasets from datastores, public URLs, and [Azure Open Datasets](/azure/open-datasets/how-to-create-azure-machine-learning-dataset-from-open-dataset). +By creating a dataset, you create a reference to the data source location, along with a copy of its metadata. Because the data remains in its existing location, you incur no extra storage cost, and don't risk the integrity of your data sources. Also datasets are lazily evaluated, which aids in workflow performance speeds. You can create datasets from datastores, public URLs, and [Azure Open Datasets](../../open-datasets/how-to-create-azure-machine-learning-dataset-from-open-dataset.md). For a low-code experience, [Create Azure Machine Learning datasets with the Azure Machine Learning studio.](../how-to-connect-data-ui.md#create-datasets) @@ -105,7 +105,7 @@ For the data to be accessible by Azure Machine Learning, datasets must be create To create datasets from a datastore with the Python SDK: -1. Verify that you have `contributor` or `owner` access to the underlying storage service of your registered Azure Machine Learning datastore. [Check your storage account permissions in the Azure portal](/azure/role-based-access-control/check-access). +1. Verify that you have `contributor` or `owner` access to the underlying storage service of your registered Azure Machine Learning datastore. [Check your storage account permissions in the Azure portal](../../role-based-access-control/check-access.md). 1. Create the dataset by referencing paths in the datastore. You can create a dataset from multiple paths in multiple datastores. There is no hard limit on the number of files or data size that you can create a dataset from. @@ -388,4 +388,4 @@ titanic_ds = titanic_ds.register(workspace = workspace, * Learn [how to train with datasets](../how-to-train-with-datasets.md). * Use automated machine learning to [train with TabularDatasets](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb). -* For more dataset training examples, see the [sample notebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/work-with-data/). +* For more dataset training examples, see the [sample notebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/work-with-data/). \ No newline at end of file diff --git a/articles/machine-learning/v1/how-to-identity-based-data-access.md b/articles/machine-learning/v1/how-to-identity-based-data-access.md index 4c50bfb71b4e..0c68bbc2951b 100644 --- a/articles/machine-learning/v1/how-to-identity-based-data-access.md +++ b/articles/machine-learning/v1/how-to-identity-based-data-access.md @@ -18,7 +18,7 @@ ms.custom: contperf-fy21q1, devx-track-python, data4ml In this article, you learn how to connect to storage services on Azure by using identity-based data access and Azure Machine Learning datastores via the [Azure Machine Learning SDK for Python](/python/api/overview/azure/ml/intro). -Typically, datastores use **credential-based authentication** to confirm you have permission to access the storage service. They keep connection information, like your subscription ID and token authorization, in the [key vault](https://azure.microsoft.com/services/key-vault/) that's associated with the workspace. When you create a datastore that uses **identity-based data access**, your Azure account ([Azure Active Directory token](/azure/active-directory/fundamentals/active-directory-whatis)) is used to confirm you have permission to access the storage service. In the **identity-based data access** scenario, no authentication credentials are saved. Only the storage account information is stored in the datastore. +Typically, datastores use **credential-based authentication** to confirm you have permission to access the storage service. They keep connection information, like your subscription ID and token authorization, in the [key vault](https://azure.microsoft.com/services/key-vault/) that's associated with the workspace. When you create a datastore that uses **identity-based data access**, your Azure account ([Azure Active Directory token](../../active-directory/fundamentals/active-directory-whatis.md)) is used to confirm you have permission to access the storage service. In the **identity-based data access** scenario, no authentication credentials are saved. Only the storage account information is stored in the datastore. To create datastores with **identity-based** data access via the Azure Machine Learning studio UI, see [Connect to data with the Azure Machine Learning studio](../how-to-connect-data-ui.md#create-datastores). @@ -59,9 +59,9 @@ Certain machine learning scenarios involve training models with private data. In - An Azure subscription. If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning](https://azure.microsoft.com/free/). - An Azure storage account with a supported storage type. These storage types are supported: - - [Azure Blob Storage](/azure/storage/blobs/storage-blobs-overview) - - [Azure Data Lake Storage Gen1](/azure/data-lake-store/) - - [Azure Data Lake Storage Gen2](/azure/storage/blobs/data-lake-storage-introduction) + - [Azure Blob Storage](../../storage/blobs/storage-blobs-overview.md) + - [Azure Data Lake Storage Gen1](../../data-lake-store/index.yml) + - [Azure Data Lake Storage Gen2](../../storage/blobs/data-lake-storage-introduction.md) - [Azure SQL Database](/azure/azure-sql/database/sql-database-paas-overview) - The [Azure Machine Learning SDK for Python](/python/api/overview/azure/ml/install). @@ -153,7 +153,7 @@ Identity-based data access supports connections to **only** the following storag * Azure Data Lake Storage Gen2 * Azure SQL Database -To access these storage services, you must have at least [Storage Blob Data Reader](/azure/role-based-access-control/built-in-roles#storage-blob-data-reader) access to the storage account. Only storage account owners can [change your access level via the Azure portal](/azure/storage/blobs/assign-azure-role-data-access). +To access these storage services, you must have at least [Storage Blob Data Reader](../../role-based-access-control/built-in-roles.md#storage-blob-data-reader) access to the storage account. Only storage account owners can [change your access level via the Azure portal](../../storage/blobs/assign-azure-role-data-access.md). If you prefer to not use your user identity (Azure Active Directory), you also have the option to grant a workspace managed-system identity (MSI) permission to create the datastore. To do so, you must have Owner permissions to the storage account and add the `grant_workspace_access= True` parameter to your data register method. @@ -247,4 +247,4 @@ identity: * [Create an Azure Machine Learning dataset](how-to-create-register-datasets.md) * [Train with datasets](../how-to-train-with-datasets.md) -* [Create a datastore with key-based data access](how-to-access-data.md) +* [Create a datastore with key-based data access](how-to-access-data.md) \ No newline at end of file diff --git a/articles/machine-learning/v1/how-to-track-monitor-analyze-runs.md b/articles/machine-learning/v1/how-to-track-monitor-analyze-runs.md index 780409e66928..c13134ccfbe2 100644 --- a/articles/machine-learning/v1/how-to-track-monitor-analyze-runs.md +++ b/articles/machine-learning/v1/how-to-track-monitor-analyze-runs.md @@ -441,7 +441,7 @@ root_run(current_child_run).log("MyMetric", f"Data from child run {current_child 1. In the **Destination details**, select the **Send to Log Analytics workspace** and specify the **Subscription** and **Log Analytics workspace**. > [!NOTE] - > The **Azure Log Analytics Workspace** is a different type of Azure Resource than the **Azure Machine Learning service Workspace**. If there are no options in that list, you can [create a Log Analytics Workspace](/azure/azure-monitor/logs/quick-create-workspace). + > The **Azure Log Analytics Workspace** is a different type of Azure Resource than the **Azure Machine Learning service Workspace**. If there are no options in that list, you can [create a Log Analytics Workspace](../../azure-monitor/logs/quick-create-workspace.md). ![Screenshot of configuring the email notification.](./media/how-to-track-monitor-analyze-runs/log-location.png) @@ -449,7 +449,7 @@ root_run(current_child_run).log("MyMetric", f"Data from child run {current_child ![Screeenshot of the new alert rule.](./media/how-to-track-monitor-analyze-runs/new-alert-rule.png) -1. See [how to create and manage log alerts using Azure Monitor](/azure/azure-monitor/alerts/alerts-log). +1. See [how to create and manage log alerts using Azure Monitor](../../azure-monitor/alerts/alerts-log.md). ## Example notebooks @@ -462,4 +462,4 @@ The following notebooks demonstrate the concepts in this article: ## Next steps * To learn how to log metrics for your experiments, see [Log metrics during training runs](../how-to-log-view-metrics.md). -* To learn how to monitor resources and logs from Azure Machine Learning, see [Monitoring Azure Machine Learning](../monitor-azure-machine-learning.md). +* To learn how to monitor resources and logs from Azure Machine Learning, see [Monitoring Azure Machine Learning](../monitor-azure-machine-learning.md). \ No newline at end of file diff --git a/articles/managed-grafana/how-to-api-calls.md b/articles/managed-grafana/how-to-api-calls.md index 48449e5721c3..2dacd0645c1a 100644 --- a/articles/managed-grafana/how-to-api-calls.md +++ b/articles/managed-grafana/how-to-api-calls.md @@ -16,7 +16,7 @@ In this article, you'll learn how to call Grafana APIs within Azure Managed Graf ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](/azure/managed-grafana/quickstart-managed-grafana-portal). +- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](./quickstart-managed-grafana-portal.md). ## Sign in to Azure @@ -70,4 +70,4 @@ Replace `` with the access token retrieved in the previous step an ## Next steps > [!div class="nextstepaction"] -> [Grafana UI](./grafana-app-ui.md) +> [Grafana UI](./grafana-app-ui.md) \ No newline at end of file diff --git a/articles/managed-grafana/how-to-data-source-plugins-managed-identity.md b/articles/managed-grafana/how-to-data-source-plugins-managed-identity.md index aa234f5f1896..d48d2c21c203 100644 --- a/articles/managed-grafana/how-to-data-source-plugins-managed-identity.md +++ b/articles/managed-grafana/how-to-data-source-plugins-managed-identity.md @@ -13,7 +13,7 @@ ms.date: 3/31/2022 ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](/azure/managed-grafana/how-to-permissions). +- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](./how-to-permissions.md). - A resource including monitoring data with Managed Grafana monitoring permissions. Read [how to configure permissions](how-to-permissions.md) for more information. ## Sign in to Azure @@ -70,4 +70,4 @@ Authentication and authorization are subsequently made through the provided mana > [!div class="nextstepaction"] > [Modify access permissions to Azure Monitor](./how-to-permissions.md) -> [Share an Azure Managed Grafana workspace](./how-to-share-grafana-workspace.md) +> [Share an Azure Managed Grafana workspace](./how-to-share-grafana-workspace.md) \ No newline at end of file diff --git a/articles/managed-grafana/how-to-monitor-managed-grafana-workspace.md b/articles/managed-grafana/how-to-monitor-managed-grafana-workspace.md index be3b26e69f3d..b92ee6abfe82 100644 --- a/articles/managed-grafana/how-to-monitor-managed-grafana-workspace.md +++ b/articles/managed-grafana/how-to-monitor-managed-grafana-workspace.md @@ -15,7 +15,7 @@ In this article, you'll learn how to monitor an Azure Managed Grafana Preview wo ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace with access to at least one data source. If you don't have a workspace yet, [create an Azure Managed Grafana workspace](/azure/managed-grafana/how-to-permissions) and [add a data source](how-to-data-source-plugins-managed-identity.md). +- An Azure Managed Grafana workspace with access to at least one data source. If you don't have a workspace yet, [create an Azure Managed Grafana workspace](./how-to-permissions.md) and [add a data source](how-to-data-source-plugins-managed-identity.md). ## Sign in to Azure @@ -73,4 +73,4 @@ Now that you've configured your diagnostic settings, Azure will stream all new e > [!div class="nextstepaction"] > [Grafana UI](./grafana-app-ui.md) -> [How to share an Azure Managed Grafana workspace](./how-to-share-grafana-workspace.md) +> [How to share an Azure Managed Grafana workspace](./how-to-share-grafana-workspace.md) \ No newline at end of file diff --git a/articles/managed-grafana/how-to-permissions.md b/articles/managed-grafana/how-to-permissions.md index 05ddbf6ddfdc..a3ca9162d592 100644 --- a/articles/managed-grafana/how-to-permissions.md +++ b/articles/managed-grafana/how-to-permissions.md @@ -19,7 +19,7 @@ In this article, you'll learn how to manually edit permissions for a specific re ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](/azure/managed-grafana/quickstart-managed-grafana-portal). +- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](./quickstart-managed-grafana-portal.md). - An Azure resource with monitoring data and write permissions, such as [User Access Administrator](../../articles/role-based-access-control/built-in-roles.md#user-access-administrator) or [Owner](../../articles/role-based-access-control/built-in-roles.md#owner) ## Sign in to Azure @@ -57,4 +57,4 @@ To change permissions for a specific resource, follow these steps: ## Next steps > [!div class="nextstepaction"] -> [How to configure data sources for Azure Managed Grafana](./how-to-data-source-plugins-managed-identity.md) +> [How to configure data sources for Azure Managed Grafana](./how-to-data-source-plugins-managed-identity.md) \ No newline at end of file diff --git a/articles/managed-grafana/how-to-share-grafana-workspace.md b/articles/managed-grafana/how-to-share-grafana-workspace.md index a6f19fb4e6a3..5f48d734150c 100644 --- a/articles/managed-grafana/how-to-share-grafana-workspace.md +++ b/articles/managed-grafana/how-to-share-grafana-workspace.md @@ -15,7 +15,7 @@ A DevOps team may build dashboards to monitor and diagnose an application or inf ## Prerequisites - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). -- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](/azure/managed-grafana/how-to-permissions). +- An Azure Managed Grafana workspace. If you don't have one yet, [create a workspace](./how-to-permissions.md). ## Supported Grafana roles @@ -60,4 +60,4 @@ Sign in to the Azure portal at [https://portal.azure.com/](https://portal.azure. > [!div class="nextstepaction"] > [How to configure data sources for Azure Managed Grafana](./how-to-data-source-plugins-managed-identity.md) > [How to modify access permissions to Azure Monitor](./how-to-permissions.md) -> [How to call Grafana APIs in your automation with Azure Managed Grafana](./how-to-api-calls.md) +> [How to call Grafana APIs in your automation with Azure Managed Grafana](./how-to-api-calls.md) \ No newline at end of file diff --git a/articles/managed-grafana/overview.md b/articles/managed-grafana/overview.md index 6608caf8bb67..5594d876516e 100644 --- a/articles/managed-grafana/overview.md +++ b/articles/managed-grafana/overview.md @@ -14,7 +14,7 @@ Azure Managed Grafana is a data visualization platform built on top of the Grafa Azure Managed Grafana is optimized for the Azure environment. It works seamlessly with many Azure services. Specifically, for the current preview, it provides with the following integration features: -* Built-in support for [Azure Monitor](/azure/azure-monitor/) and [Azure Data Explorer](/azure/data-explorer/) +* Built-in support for [Azure Monitor](../azure-monitor/index.yml) and [Azure Data Explorer](/azure/data-explorer/) * User authentication and access control using Azure Active Directory identities * Direct import of existing charts from Azure portal @@ -35,4 +35,4 @@ You can create dashboards instantaneously by importing existing charts directly ## Next steps > [!div class="nextstepaction"] -> [Create a workspace in Azure Managed Grafana Preview using the Azure portal](./quickstart-managed-grafana-portal.md). +> [Create a workspace in Azure Managed Grafana Preview using the Azure portal](./quickstart-managed-grafana-portal.md). \ No newline at end of file diff --git a/articles/managed-instance-apache-cassandra/visualize-prometheus-grafana.md b/articles/managed-instance-apache-cassandra/visualize-prometheus-grafana.md index 1971d60be455..e1560ec3e378 100644 --- a/articles/managed-instance-apache-cassandra/visualize-prometheus-grafana.md +++ b/articles/managed-instance-apache-cassandra/visualize-prometheus-grafana.md @@ -19,7 +19,7 @@ The following tasks are required to visualize metrics: * Install the [Prometheus Dashboards](https://github.com/datastax/metric-collector-for-apache-cassandra#installing-the-prometheus-dashboards) onto the VM. >[!WARNING] -> Prometheus and Grafana are open-source software and not supported as part of the Azure Managed Instance for Apache Cassandra service. Visualizing metrics in the way described below will require you to host and maintain a virtual machine as the server for both Prometheus and Grafana. The instructions below were tested only for Ubuntu Server 18.04, there is no guarantee that they will work with other linux distributions. Following this approach will entail supporting any issues that may arise, such as running out of space, or availability of the server. For a fully supported and hosted metrics experience, consider using [Azure Monitor metrics](monitor-clusters.md#azure-metrics), or alternatively [Azure Monitor partner integrations](/azure/azure-monitor/partners). +> Prometheus and Grafana are open-source software and not supported as part of the Azure Managed Instance for Apache Cassandra service. Visualizing metrics in the way described below will require you to host and maintain a virtual machine as the server for both Prometheus and Grafana. The instructions below were tested only for Ubuntu Server 18.04, there is no guarantee that they will work with other linux distributions. Following this approach will entail supporting any issues that may arise, such as running out of space, or availability of the server. For a fully supported and hosted metrics experience, consider using [Azure Monitor metrics](monitor-clusters.md#azure-metrics), or alternatively [Azure Monitor partner integrations](../azure-monitor/partners.md). ## Deploy an Ubuntu server @@ -146,4 +146,4 @@ The following tasks are required to visualize metrics: In this article, you learned how to configure dashboards to visualize metrics in Prometheus using Grafana. Learn more about Azure Managed Instance for Apache Cassandra with the following articles: * [Overview of Azure Managed Instance for Apache Cassandra](introduction.md) -* [Deploy a Managed Apache Spark Cluster with Azure Databricks](deploy-cluster-databricks.md) +* [Deploy a Managed Apache Spark Cluster with Azure Databricks](deploy-cluster-databricks.md) \ No newline at end of file diff --git a/articles/marketplace/azure-private-plan-troubleshooting.md b/articles/marketplace/azure-private-plan-troubleshooting.md index 3eba2d1c028c..6f84be4756d2 100644 --- a/articles/marketplace/azure-private-plan-troubleshooting.md +++ b/articles/marketplace/azure-private-plan-troubleshooting.md @@ -63,7 +63,7 @@ While troubleshooting the Azure Subscription Hierarchy, keep these things in min ## Troubleshooting Checklist -- ISV to ensure the SaaS private plan is using the correct tenant ID for the customer - [How to find your Azure Active Directory tenant ID](../active-directory/fundamentals/active-directory-how-to-find-tenant.md). For VMs use the [Azure Subscription ID.](/azure/azure-portal/get-subscription-tenant-id) +- ISV to ensure the SaaS private plan is using the correct tenant ID for the customer - [How to find your Azure Active Directory tenant ID](../active-directory/fundamentals/active-directory-how-to-find-tenant.md). For VMs use the [Azure Subscription ID.](../azure-portal/get-subscription-tenant-id.md) - ISV to ensure that the Customer is not buying through a CSP. Private Plans are not available on a CSP-managed subscription. - Customer to ensure customer is logging in with an email ID that is registered under the same tenant ID (use the same user ID they used in step #1 above) - ISV to ask the customer to find the Private Plan in Azure Marketplace: [Private plans in Azure Marketplace](/marketplace/private-plans) @@ -81,4 +81,4 @@ While troubleshooting the Azure Subscription Hierarchy, keep these things in min ## Next steps -- [Create an Azure Support Request](../azure-portal/supportability/how-to-create-azure-support-request.md) +- [Create an Azure Support Request](../azure-portal/supportability/how-to-create-azure-support-request.md) \ No newline at end of file diff --git a/articles/marketplace/azure-vm-faq.yml b/articles/marketplace/azure-vm-faq.yml index a8f9a756d1e9..034a5c25d470 100644 --- a/articles/marketplace/azure-vm-faq.yml +++ b/articles/marketplace/azure-vm-faq.yml @@ -478,11 +478,11 @@ sections: answer: | You can deploy hidden preview images using quickstart templates. To deploy a preview image, - 1. Goto the respective quick-start template for [Linux](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-linux/) or [Windows](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-windows), select "Deploy to Azure". This should take you to Azure portal. + 1. Go to the respective quick-start template for [Linux](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-linux/) or [Windows](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-windows), select "Deploy to Azure". This should take you to Azure portal. 2. In Azure portal, select "Edit template". 3. In the JSON template, search for imageReference and update the publisherid, offerid, skuid, and version of the image. To test preview image, append "-PREVIEW" to the offerid. ![image](https://user-images.githubusercontent.com/79274470/110191995-71c7d500-7de0-11eb-9f3c-6a42f55d8f03.png) - 4. Click Save + 4. Select **Save**. 5. Fill out the rest of the details. Review and Create diff --git a/articles/marketplace/azure-vm-plan-technical-configuration.md b/articles/marketplace/azure-vm-plan-technical-configuration.md index ef089a527d6b..7b7dfbbfe493 100644 --- a/articles/marketplace/azure-vm-plan-technical-configuration.md +++ b/articles/marketplace/azure-vm-plan-technical-configuration.md @@ -23,7 +23,7 @@ Some common reasons for reusing the technical configuration settings from anothe - Your solution behaves differently based on the plan the user chooses to deploy. For example, the software is the same, but features vary by plan. > [!NOTE] -> If you would like to use a public plan to create a private plan with a different price, consider creating a private offer instead of reusing the technical configuration. Learn more about [the difference between private plans and private offers](/azure/marketplace/isv-customer-faq). Learn more about [how to create a private offer](/azure/marketplace/isv-customer). +> If you would like to use a public plan to create a private plan with a different price, consider creating a private offer instead of reusing the technical configuration. Learn more about [the difference between private plans and private offers](./isv-customer-faq.yml). Learn more about [how to create a private offer](./isv-customer.md). Leverage [Azure Instance Metadata Service](../virtual-machines/windows/instance-metadata-service.md) (IMDS) to identify which plan your solution is deployed within to validate license or enabling of appropriate features. @@ -55,23 +55,23 @@ Here is a list of properties that can be selected for your VM. Enable the proper - Python version above 2.6+ - For more information, see [VM Extension](/azure/marketplace/azure-vm-certification-faq). + For more information, see [VM Extension](./azure-vm-certification-faq.yml). -- **Supports backup**: Enable this property if your images support Azure VM backup. Learn more about [Azure VM backup](/azure/backup/backup-azure-vms-introduction). +- **Supports backup**: Enable this property if your images support Azure VM backup. Learn more about [Azure VM backup](../backup/backup-azure-vms-introduction.md). -- **Supports accelerated networking**: The VM images in this plan support single root I/O virtualization (SR-IOV) to a VM, enabling low latency and high throughput on the network interface. Learn more about [accelerated networking for Linux](/azure/virtual-network/create-vm-accelerated-networking-cli). Learn more about [accelerated networking for Windows](/azure/virtual-network/create-vm-accelerated-networking-powershell). +- **Supports accelerated networking**: The VM images in this plan support single root I/O virtualization (SR-IOV) to a VM, enabling low latency and high throughput on the network interface. Learn more about [accelerated networking for Linux](../virtual-network/create-vm-accelerated-networking-cli.md). Learn more about [accelerated networking for Windows](../virtual-network/create-vm-accelerated-networking-powershell.md). - **Is a network virtual appliance**: A network virtual appliance is a product that performs one or more network functions, such as a Load Balancer, VPN Gateway, Firewall or Application Gateway. Learn more about [network virtual appliances](https://go.microsoft.com/fwlink/?linkid=2155373). - **Supports NVMe** - Enable this property if the images in this plan support NVMe disk interface. The NVMe interface offers higher and consistent IOPS and bandwidth relative to legacy SCSI interface. -- **Supports cloud-init configuration**: Enable this property if the images in this plan support cloud-init post deployment scripts. Learn more about [cloud-init configuration](/azure/virtual-machines/linux/using-cloud-init). +- **Supports cloud-init configuration**: Enable this property if the images in this plan support cloud-init post deployment scripts. Learn more about [cloud-init configuration](../virtual-machines/linux/using-cloud-init.md). - **Supports hibernation** – The images in this plan support hibernation/resume. - **Remote desktop/SSH not supported**: Enable this property if any of the following conditions are true: - - Virtual machines deployed with these images don't allow customers to access it using Remote Desktop or SSH. Learn more about [locked VM images](/azure/marketplace/azure-vm-certification-faq#locked-down-or-ssh-disabled-offer.md). Images that are published with either SSH disabled (for Linux) or RDP disabled (for Windows) are treated as Locked down VMs. There are special business scenarios to restrict access to users. During validation checks, Locked down VMs might not allow execution of certain certification commands. + - Virtual machines deployed with these images don't allow customers to access it using Remote Desktop or SSH. Learn more about [locked VM images](./azure-vm-certification-faq.yml#locked-down-or-ssh-disabled-offer). Images that are published with either SSH disabled (for Linux) or RDP disabled (for Windows) are treated as Locked down VMs. There are special business scenarios to restrict access to users. During validation checks, Locked down VMs might not allow execution of certain certification commands. - Image does not support sampleuser while deploying. - Image has limited access. @@ -90,12 +90,12 @@ Below are examples (non-exhaustive) that might require custom templates for depl ## Image types -Generations of a virtual machine defines the virtual hardware it uses. Based on your customer’s needs, you can publish a Generation 1 VM, Generation 2 VM, or both. To learn more about the differences between Generation 1 and Generation 2 capabilities, see [Support for generation 2 VMs on Azure](/azure/virtual-machines/generation-2). +Generations of a virtual machine defines the virtual hardware it uses. Based on your customer’s needs, you can publish a Generation 1 VM, Generation 2 VM, or both. To learn more about the differences between Generation 1 and Generation 2 capabilities, see [Support for generation 2 VMs on Azure](../virtual-machines/generation-2.md). When creating a new plan, select an Image type from the drop-down menu. You can choose either X64 Gen 1 or X64 Gen 2. To add another image type to a plan, select **+Add image type**. You will need to provide a SKU ID for each new image type that is added. > [!NOTE] -> A published generation requires at least one image version to remain available for customers. To remove the entire plan (along with all its generations and images), select **Deprecate plan** on the **Plan Overview** page. Learn more about [deprecating plans](/azure/marketplace/deprecate-vm). +> A published generation requires at least one image version to remain available for customers. To remove the entire plan (along with all its generations and images), select **Deprecate plan** on the **Plan Overview** page. Learn more about [deprecating plans](./deprecate-vm.md). > ## VM images @@ -105,7 +105,7 @@ To add a new image version, click **+Add VM image**. This will open a panel in w Keep in mind the following when publishing VM images: 1. Provide only one new VM image per image type in a given submission. -2. After an image has been published, you can't edit it, but you can deprecate it. Deprecating a version prevents both new and existing users from deploying a new instance of the deprecated version. Learn more about [deprecating VM images](/azure/marketplace/deprecate-vm). +2. After an image has been published, you can't edit it, but you can deprecate it. Deprecating a version prevents both new and existing users from deploying a new instance of the deprecated version. Learn more about [deprecating VM images](./deprecate-vm.md). 3. You can add up to 16 data disks for each VM image provided. Regardless of which operating system you use, add only the minimum number of data disks that the solution requires. During deployment, customers can’t remove disks that are part of an image, but they can always add disks during or after deployment. > [!NOTE] diff --git a/articles/marketplace/dynamics-365-customer-engage-availability.md b/articles/marketplace/dynamics-365-customer-engage-availability.md index 52fb77338639..7689ddaed208 100644 --- a/articles/marketplace/dynamics-365-customer-engage-availability.md +++ b/articles/marketplace/dynamics-365-customer-engage-availability.md @@ -6,7 +6,7 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: how-to author: vamahtan ms.author: vamahtan -ms.date: 12/03/2021 +ms.date: 05/25/2022 --- # Configure Dynamics 365 apps on Dataverse and Power Apps offer availability @@ -17,6 +17,9 @@ This page lets you define where and how to make your offer available, including To specify the markets in which your offer should be available, select **Edit markets**. +> [!NOTE] +> If you choose to sell through Microsoft and have Microsoft host transactions on your behalf, then the **Markets** section is not available on this page. In this case, you’ll configure the markets later when you create plans for the offer. If the **Markets** section isn’t shown, go to [Preview audience](#preview-audience). + On the **Market selection** popup window, select at least one market. Choose **Select all** to make your offer available in every possible market or select only the specific markets you want. When you're finished, select **Save**. Your selections here apply only to new acquisitions; if someone already has your app in a certain market, and you later remove that market, the people who already have the offer in that market can continue to use it, but no new customers in that market will be able to get your offer. diff --git a/articles/marketplace/dynamics-365-customer-engage-offer-setup.md b/articles/marketplace/dynamics-365-customer-engage-offer-setup.md index c4190a741116..290ac2960027 100644 --- a/articles/marketplace/dynamics-365-customer-engage-offer-setup.md +++ b/articles/marketplace/dynamics-365-customer-engage-offer-setup.md @@ -6,14 +6,12 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: how-to author: vamahtan ms.author: vamahtan -ms.date: 04/18/2022 +ms.date: 05/25/2022 --- # Create a Dynamics 365 apps on Dataverse and Power Apps offer -This article describes how to create a Dynamics 365 apps on Dataverse and Power Apps offer. All offers for Dynamics 365 go through our certification process. The trial experience allows users to deploy your solution to a live Dynamics 365 environment. - -Before you start, create a commercial marketplace account in [Partner Center](./create-account.md) and ensure it is enrolled in the commercial marketplace program. +This article describes how to create a _Dynamics 365 apps on Dataverse and Power Apps_ offer. Before you start, create a commercial marketplace account in [Partner Center](./create-account.md) and ensure it is enrolled in the commercial marketplace program. ## Before you begin @@ -59,20 +57,30 @@ Enter a descriptive name that we'll use to refer to this offer solely within Par ## Setup details -For **How do you want potential customers to interact with this listing offer?**, select the option you want to use for this offer: +1. On the _Offer setup_ page, choose one of the following options: -- **Enable app license management through Microsoft** – Manage your app licenses through Microsoft. To let customers run your app’s base functionality without a license and run premium features after they’ve purchased a license, select the **Allow customers to install my app even if licenses are not assigned box**. If you select this second box, you need to configure your solution package to not require a license. + - Select **Yes** to sell through Microsoft and have Microsoft host transactions on your behalf. + + If you choose this option, the Enable app license management through Microsoft check box is enabled and cannot be changed. - > [!NOTE] - > You cannot change this setting after you publish your offer. To learn more about this setting, see [ISV app license management](isv-app-license.md). + > [!NOTE] + > This capability is currently in Public Preview. -- **Get it now (free)** – List your offer to customers for free. -- **Free trial (listing)** – List your offer to customers with a link to a free trial. Offer listing free trials are created, managed, and configured by your service and do not have subscriptions managed by Microsoft. + - Select **No**, if you prefer to only list your offer through the marketplace and process transactions independently. - > [!NOTE] - > The tokens your application will receive through your trial link can only be used to obtain user information through Azure Active Directory (Azure AD) to automate account creation in your app. Microsoft accounts are not supported for authentication using this token. + If you choose this option, you can use the **Enable app license management through Microsoft** check box to choose whether or not to enable app license management through Microsoft. For more information, see [ISV app license management](isv-app-license.md). + +1. To let customers run your app’s base functionality without a license and run premium features after they’ve purchased a license, select the **Allow customers to install my app even if licenses are not assigned** box. If you select this second box, you need to configure your solution package to not require a license. + +1. If you chose **No** in step 1 and chose not to enable app license management through Microsoft, then you can select one of the following: + + - **Get it now (free)** – List your offer to customers for free. + - **Free trial (listing)** – List your offer to customers with a link to a free trial. The trial experience lets users deploy your solution to a live Dynamics 365 environment. Offer listing free trials are created, managed, and configured by your service and do not have subscriptions managed by Microsoft. + + > [!NOTE] + > The tokens your application will receive through your trial link can only be used to obtain user information through Azure Active Directory (Azure AD) to automate account creation in your app. Microsoft accounts are not supported for authentication using this token. -- **Contact me** – Collect customer contact information by connecting your Customer Relationship Management (CRM) system. The customer will be asked for permission to share their information. These customer details, along with the offer name, ID, and marketplace source where they found your offer, will be sent to the CRM system that you've configured. For more information about configuring your CRM, see [Customer leads](#customer-leads). + - **Contact me** – Collect customer contact information by connecting your Customer Relationship Management (CRM) system. The customer will be asked for permission to share their information. These customer details, along with the offer name, ID, and marketplace source where they found your offer, will be sent to the CRM system that you've configured. For more information about configuring your CRM, see [Customer leads](#customer-leads). ## Test drive diff --git a/articles/marketplace/dynamics-365-customer-engage-plans.md b/articles/marketplace/dynamics-365-customer-engage-plans.md index 0dab8e9dfaf0..dffba7935091 100644 --- a/articles/marketplace/dynamics-365-customer-engage-plans.md +++ b/articles/marketplace/dynamics-365-customer-engage-plans.md @@ -6,7 +6,7 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: how-to author: vamahtan ms.author: vamahtan -ms.date: 12/03/2021 +ms.date: 05/25/2022 --- # Create Dynamics 365 apps on Dataverse and Power Apps plans @@ -15,12 +15,12 @@ If you enabled app license management for your offer, the **Plans overview** tab [ ![Screenshot of the Plan overview tab for a Dynamics 365 apps on Dataverse and Power Apps offer that's been enabled for third-party app licensing.](./media/third-party-license/plan-tab-d365-workspaces.png) ](./media/third-party-license/plan-tab-d365-workspaces.png#lightbox) -You need to define at least one plan, if your offer has app license management enabled. You can create a variety of plans with different options for the same offer. These plans (sometimes referred to as SKUs) can differ in terms of monetization or tiers of service. Later, you will map the Service IDs of these plans in your solution package to enable a runtime license check by the Dynamics platform against these plans. You will map the Service ID of each plan in your solution package. This enables the Dynamics platform to run a license check against these plans. +You need to define at least one plan, if your offer has app license management enabled. You can create a variety of plans with different options for the same offer. These plans (sometimes referred to as SKUs) can differ in terms of monetization or tiers of service. Later, you will map the Service IDs of each plan in the metadata of your solution package to enable a runtime license check by the Dynamics platform against these plans (we'll walk you through this process later). You will map the Service ID of each plan in your solution package. ## Create a plan 1. In the left-nav, select **Plan overview**. -1. Near the top of the **Plan overview** page, select **+ Create new plan**. +1. Near the top of the page, select **+ Create new plan**. 1. In the dialog box that appears, in the **Plan ID** box, enter a unique plan ID. Use up to 50 lowercase alphanumeric characters, dashes, or underscores. You cannot modify the plan ID after you select **Create**. 1. In the **Plan name** box, enter a unique name for this plan. Use a maximum of 200 characters. 1. Select **Create**. @@ -31,24 +31,94 @@ On the **Plan listing** tab, you can define the plan name and description as you 1. In the **Plan name** box, the name you provided earlier for this plan appears here. You can change it at any time. This name will appear in the commercial marketplace as the title of your offer's software plan. 1. In the **Plan description** box, explain what makes this software plan unique and any differences from other plans within your offer. This description may contain up to 3,000 characters. -1. Select **Save draft**, and then in the breadcrumb at the top of the page, select **Plans**. +1. Select **Save draft**. - [ ![Screenshot shows the Plan overview link on the Plan listing page of an offer in Partner Center.](./media/third-party-license/bronze-plan-workspaces.png) ](./media/third-party-license/bronze-plan-workspaces.png#lightbox) +## Define pricing and availability -1. To create another plan for this offer, at the top of the **Plan overview** page, select **+ Create new plan**. Then repeat the steps in the [Create a plan](#create-a-plan) section. Otherwise, if you're done creating plans, go to the next section: Copy the Service IDs. +If you chose to sell through Microsoft and have Microsoft host transactions on your behalf, then the **Pricing and availability** tab appears in the left-nav. Otherwise, go to [Copy the Service IDs](#copy-the-service-ids). + +1. In the left-nav, select **Pricing and availability**. +1. In the **Markets** section, select **Edit markets**. +1. On the side panel that appears, select at least one market. To make your offer available in every possible market, choose **Select all** or select only the specific markets you want. When you're finished, select **Save**. + + Your selections here apply only to new acquisitions; if someone already has your app in a certain market, and you later remove that market, the people who already have the offer in that market can continue to use it, but no new customers in that market will be able to get your offer. + + > [!IMPORTANT] + > It is your responsibility to meet any local legal requirements, even if those requirements aren't listed here or in Partner Center. Even if you select all markets, local laws, restrictions, or other factors may prevent certain offers from being listed in some countries and regions. + +### Configure per user pricing + +1. On the **Pricing and availability** tab, under **User limits**, optionally specify the minimum and maximum number of users that for this plan. + > [!NOTE] + > If you choose not to define the user limits, the default value of one to one million users will be used. +1. Under **Billing term**, specify a monthly price, annual price, or both. + + > [!NOTE] + > You must specify a price for your offer, even if the price is zero. + +### Enable a free trial + +You can optionally configure a free trial for each plan in your offer. To enable a free trial, select the **Allow a one-month free trial** check box. + +> [!IMPORTANT] +> After your transactable offer has been published with a free trial, it cannot be disabled for that plan. Make sure this setting is correct before you publish the offer to avoid having to re-create the plan. + +If you select this option, customers are not charged for the first month of use. At the end of the free month, one of the following occurs: +- If the customer chose recurring billing, they will automatically be upgraded to a paid plan and the selected payment method is charged. +- If the customer didn’t choose recurring billing, the plan will expire at the end of the free trial. + +### Choose who can see your plan + +You can configure each plan to be visible to everyone or to only a specific audience. You grant access to a private plan using tenant IDs with the option to include a description of each tenant ID you assign. You can add a maximum of 10 tenant IDs manually or up to 20,000 tenant IDs using a .CSV file. A private plan is not the same as a preview audience. + +> [!NOTE] +> If you publish a private plan, you can change its visibility to public later. However, once you publish a public plan, you cannot change its visibility to private. + +#### Make your plan public + +1. Under **Plan visibility**, select **Public**. +1. Select **Save draft**, and then go to [View your plans](#view-your-plans). + +#### Manually add tenant IDs for a private plan + +1. Under **Plan visibility**, select **Private**. +1. In the **Tenant ID** box that appears, enter the Azure AD tenant ID of the audience you want to grant access to this private plan. A minimum of one tenant ID is required. +1. (Optional) Enter a description of this audience in the **Description** box. +1. To add another tenant ID, select **Add ID**, and then repeat steps 2 and 3. +1. When you're done adding tenant IDs, select **Save draft**, and then go to [View your plans](#view-your-plans). + +#### Use a .CSV file for a private plan + +1. Under **Plan visibility**, select **Private**. +1. Select the **Export Audience (csv)** link. +1. Open the .CSV file and add the Azure IDs you want to grant access to the private offer to the **ID** column. +1. (Optional) Enter a description for each audience in the **Description** column. +1. Add "TenantID" in the **Type** column, for each row with an Azure ID. +1. Save the .CSV file. +1. On the **Pricing and availability** tab, under **Plan visibility**, select the **Import Audience (csv)** link. +1. In the dialog box that appears, select **Yes**. +1. Select the .CSV file and then select **Open**. +1. Select **Save draft**, and then the next section: View your plans. + +### View your plans + +1. In the breadcrumb at the top of the page, select **Plan overview**. +1. To create another plan for this offer, at the top of the **Plan overview** page, repeat the steps in the [Create a plan](#create-a-plan) section. Otherwise, if you're done creating plans, go to the next section: Copy the Service IDs. ## Copy the Service IDs You need to copy the Service ID of each plan you created so you can map them to your solution package in the next section: Add Service IDs to your solution package. -- For each plan you created, copy the Service ID to a safe place. You’ll add them to your solution package in the next step. The service ID is listed on the **Plan overview** page in the form of `ISV name.offer name.plan ID`. For example, Fabrikam.F365.bronze. +1. To go to the **Plan overview** page, in the breadcrumb at the top of the page, select **Plan overview**. If you don’t see the breadcrumb, select **Plan overview** in the left-nav. + +1. For each plan you created, copy the Service ID to a safe place. You’ll add them to your solution package in the next section. The service ID is listed on the **Plan overview** page in the form of `ISV name.offer name.plan ID`. For example, fabrikam.f365.bronze. [ ![Screenshot of the Plan overview page. The service ID for the plan is highlighted.](./media/third-party-license/service-id-workspaces.png) ](./media/third-party-license/service-id-workspaces.png#lightbox) ## Add Service IDs to your solution package -1. Add the Service IDs you copied in the previous step to your solution package. To learn how, see [Add licensing information to your solution](/powerapps/developer/data-platform/appendix-add-license-information-to-your-solution) and [Create an AppSource package for your app](/powerapps/developer/data-platform/create-package-app-appsource). -1. After you create the CRM package .zip file, upload it to Azure Blob Storage. You will need to provide the SAS URL of the Azure Blob Storage account that contains the uploaded CRM package .zip file. +1. Add the Service IDs you copied in the previous step to the metadata of your solution package. To learn how, see [Add licensing information to your solution](/powerapps/developer/data-platform/appendix-add-license-information-to-your-solution) and [Create an AppSource package for your app](/powerapps/developer/data-platform/create-package-app-appsource). +1. After you create the CRM package .zip file, upload it to [Azure Blob Storage](/power-apps/developer/data-platform/store-appsource-package-azure-storage). You will need to provide the SAS URL of the Azure Blob Storage account that contains the uploaded CRM package .zip file, when configuring the technical configuration. ## Next steps diff --git a/articles/marketplace/dynamics-365-review-publish.md b/articles/marketplace/dynamics-365-review-publish.md index 69000354b1d9..ef5cc0e26062 100644 --- a/articles/marketplace/dynamics-365-review-publish.md +++ b/articles/marketplace/dynamics-365-review-publish.md @@ -7,53 +7,81 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: how-to author: vamahtan ms.author: vamahtan -ms.date: 09/27/2021 +ms.date: 05/25/2022 --- # Review and publish a Dynamics 365 offer -This article shows you how to use Partner Center to preview your draft Dynamics 365 offer and then publish it to the commercial marketplace. It also covers how to check publishing status as it proceeds through the publishing steps. +This article shows you how to use Partner Center to submit your Dynamics 365 offer for publishing, preview your offer, subscribe to a plan, and then publish it live to the commercial marketplace. It also covers how to check the publishing status as it proceeds through the publishing steps. You must have already created the offer that you want to publish. -## Offer status +## Submit your offer to publishing -You can review your offer status on the **Overview** tab of the commercial marketplace dashboard in [Partner Center](https://partner.microsoft.com/dashboard/commercial-marketplace/overview). The **Status** of each offer will be one of the following: +1. Return to [Partner Center](https://go.microsoft.com/fwlink/?linkid=2166002). +1. On the Home page, select the **Marketplace offers** tile. +1. In the **Offer alias** column, select the offer you want to publish. +1. In the upper-right corner of the portal, select **Review and publish**. +1. Make sure that the **Status column** for each page for the offer says **Complete**. The three possible statuses are as follows: + + - **Not started** – The page is incomplete. + - **Incomplete** – The page is missing required information or has errors that need to be fixed. You'll need to go back to the page and update it. + - **Complete** – The page is complete. All required data has been provided and there are no errors. + +1. If any of the pages have a status other than **Complete**, select the page name, correct the issue, save the page, and then select **Review and publish** again to return to this page. +1. Some offer types require testing. After all of the pages are complete, if you see a **Notes for certification** box, provide testing instructions to the certification team to ensure that your app is tested correctly. Provide any supplementary notes helpful for understanding your app. +1. To start the publishing process for your offer, select **Publish**. The **Offer overview** page appears and shows the offer's **Publish status**. + +## Publish status + +Your offer's publish status will change as it moves through the publication process. You can review your offer status on the **Overview** tab of the commercial marketplace offer in [Partner Center](https://partner.microsoft.com/dashboard/commercial-marketplace/overview). The **Status** of each offer will be one of the following: | Status | Description | -| ------------ | ------------- | +| ------------ | ------------ | | Draft | Offer has been created but it isn't being published. | | Publish in progress | Offer is working its way through the publishing process. | | Attention needed | We discovered a critical issue during certification or during another publishing phase. | | Preview | We certified the offer, which now awaits a final verification by the publisher. Select **Go live** to publish the offer live. | | Live | Offer is live in the marketplace and can be seen and acquired by customers. | -| Pending stop sell | Publisher selected "stop sell" on an offer or plan, but the action has not yet been completed. | +| Pending stop distribution | Publisher selected "stop distribution" on an offer or plan, but the action has not yet been completed. | | Not available in the marketplace | A previously published offer in the marketplace has been removed. | -## Validation and publishing steps +## Preview and subscribe to the offer -Your offer's publish status will change as it moves through the publication process. For detailed information on this process, see [Validation and publishing steps](review-publish-offer.md#validation-and-publishing-steps). +When the offer is ready for you to test in the preview environment, we’ll send you an email to request that you review and approve your offer preview. You can also refresh the **Offer overview** page in your browser to see if your offer has reached the Publisher sign-off phase. If it has, the **Go live** button and preview link will be available. If you chose to sell your offer through Microsoft, anyone who has been added to the preview audience can test the acquisition and deployment of your offer to ensure it meets your requirements during this stage. -When you are ready to submit an offer for publishing, select **Review and publish** at the upper-right corner of the portal. You'll see the status of each page for your offer listed as one of the following: +The following screenshot shows the **Offer overview** page for a _Dynamics 365 apps on Dataverse and Power apps_ offer, with a preview link under the **Go live** button. The validation steps you’ll see on this page vary depending on the selections you made when you created the offer. -- **Not started** – The page is incomplete. -- **Incomplete** – The page is missing required information or has errors that need to be fixed. You'll need to go back to the page and update it. -- **Complete** – The page is complete. All required data has been provided and there are no errors. +- To preview your offer, select the _preview link_ under the **Go live** button. This takes you to the product details page on AppSource, where you can validate that all the details of the offer are showing correctly. -If any of the pages have a status other than **Complete**, you need to correct the issue on that page and then return to the **Review and publish** page to confirm the status now shows as **Complete**. Some offer types require testing. If so, you will see a **Notes for certification** field where you need to provide testing instructions to the certification team and any supplementary notes helpful for understanding your app. + [ ![Illustrates the preview link on the Offer overview page.](./media/dynamics-365/preview-link.png) ](./media/dynamics-365/preview-link.png#lightbox) -After all pages are complete and you have entered applicable testing notes, select **Publish** to submit your offer. We will email you when a preview version of your offer is available to approve. At that time complete the following steps: +> [!IMPORTANT] +> To validate the end-to-end purchase and setup flow, purchase your offer while it is in Preview. First notify Microsoft with a support ticket to ensure we are aware that you're testing the offer. Otherwise, the customer account used for the purchase will be billed and invoiced. Publisher Payout will occur when the criteria are met and will be paid out per the payout schedule with the agency fee deducted from the purchase price. -1. Return to [Partner Center](https://go.microsoft.com/fwlink/?linkid=2166002). -1. On the Home page, select the **Marketplace offers** tile. +If your offer is a _Contact Me_ listing, test that a lead is created as expected by providing the Contact Me details during preview. + +## Test the offer in AppSource + +1. From the _Product details_ page of the offer, select the **Buy Now** button. +1. Select the plan you want to purchase and then select **Next**. +1. Select the billing term, recurring billing term, and number of users. +1. On the Payment page, enter the sold-to address and payment method. +1. To place the order, select the **Place order** button. +1. Once the order is placed, you can select the **Assign licenses** button to go to the [Microsoft 365 admin center](https://admin.microsoft.com/) to assign licenses to users. + +## Go live - [ ![Illustrates the Marketplace offers tile on the Partner Center Home page.](./media/workspaces/partner-center-home.png) ](./media/workspaces/partner-center-home.png#lightbox) +After you complete your tests, you can publish the offer live to the commercial marketplace. +1. Return to [Partner Center](https://go.microsoft.com/fwlink/?linkid=2166002). +1. On the Home page, select the **Marketplace offers** tile. 1. On the Marketplace offers page, select the offer. -1. Select **Review and publish**. 1. Select **Go live** to make your offer publicly available. -After you select **Review and publish**, we will perform certification and other verification processes before your offer is published to AppSource. We will notify you when your offer is available in preview so you can go live. If there is an issue, we will notify you with the details and provide guidance on how to fix it. +All offers for Dynamics 365 go through our certification process. Now that you’ve chosen to make your offer available in the commercial marketplace, we will perform certification and other verification processes before your offer is published to AppSource. If there is an issue, we will notify you with the details and provide guidance on how to fix it. + +After these validation checks are complete, your offer will be live in the marketplace. ## Next steps -- If you enabled _Third-party app license management through Microsoft_ for your offer, after you sell your offer, you’ll need to register the deal in Partner Center. To learn more, see [Managing licensing in marketplace offers](/partner-center/csp-commercial-marketplace-licensing). +- If you enabled _Third-party app license management through Microsoft_ for your offer, after you sell your offer, you’ll need to register the deal in Partner Center. To learn more, see [Register deals you've won in Partner Center](/partner-center/register-deals). - [Update an existing offer in the Commercial Marketplace](update-existing-offer.md) diff --git a/articles/marketplace/isv-app-license.md b/articles/marketplace/isv-app-license.md index b4c71136e2aa..44db5d41029b 100644 --- a/articles/marketplace/isv-app-license.md +++ b/articles/marketplace/isv-app-license.md @@ -7,7 +7,7 @@ ms.topic: conceptual author: mingshen-ms ms.author: mingshen ms.reviewer: dannyevers -ms.date: 12/03/2021 +ms.date: 05/25/2022 --- # ISV app license management @@ -16,15 +16,15 @@ Applies to the following offer type: - Dynamics 365 apps on Dataverse and Power Apps -_ISV app license management_ enables independent software vendors (ISVs) who build solutions using Dynamics 365 suite of products to manage and enforce licenses for their solutions using systems provided by Microsoft. By adopting this approach you can: +_ISV app license management_ enables independent software vendors (ISVs) who build solutions using Dynamics 365 suite of products to manage and enforce licenses for their solutions using systems provided by Microsoft. By adopting license management, ISVs can: -- Enable your customers to assign and unassign your solution’s licenses using familiar tools such as Microsoft 365 Admin Center, which they use to manage Office and Dynamics licenses. -- Have the Power Platform enforce your licenses at runtime to ensure that only licensed users can access your solution. +- Enable your customers to assign and unassign licenses of ISV products using familiar tools such as Microsoft 365 Admin Center, which customers use to manage Office and Dynamics licenses. +- Have the Power Platform enforce ISV product licenses at runtime to ensure that only licensed users can access your solution. - Save yourself the effort of building and maintaining your own license management and enforcement system. - -> [!NOTE] -> ISV app license management is only available to ISVs participating in the ISV Connect program. Microsoft is not involved in the sale of licenses. +ISV app license management currently supports: +- A named user license model. Each license must be assigned to an Azure AD user or Azure AD security group. +- [Enforcement for model-driven apps](/power-apps/maker/model-driven-apps/model-driven-app-overview). ## Prerequisites @@ -37,17 +37,49 @@ To manage your ISV app licenses, you need to comply with the following pre-requi ## High-level process -This table illustrates the high-level process to manage ISV app licenses: +The process varies depending on whether Microsoft hosts transactions on your behalf (also known as a _transactable offer_) or you only list the offer through the marketplace and host transactions independently. + +These steps illustrate the high-level process to manage ISV app licenses: + +### Step 1: Create an offer + +| Transactable offers | Licensable-only offers | +| ------------ | ------------- | +| The ISV [creates an offer in Partner Center](dynamics-365-customer-engage-offer-setup.md) and chooses to transact through Microsoft’s commerce system and enable Microsoft to manage the licenses of these add-ons. The ISV also defines at least one plan and configures pricing information and availability. The ISV can optionally define a private plan which only specific customers can see and purchase on [Microsoft AppSource](https://appsource.microsoft.com/). | The ISV [creates an offer in Partner Center](dynamics-365-customer-engage-offer-setup.md) and chooses to manage licenses for this offer through Microsoft. This includes defining one or more licensing plans for the offer. | + +### Step 2: Add license metadata to solution package + +The ISV creates a solution package for the offer that includes license plan information as metadata and uploads it to Partner Center for publication to Microsoft AppSource. To learn more, see [Adding license metadata to your solution](/powerapps/developer/data-platform/appendix-add-license-information-to-your-solution). + +### Step 3: Purchase subscription to ISV products + +| Transactable offers | Licensable-only offers | +| ------------ | ------------- | +| Customers discover the ISV’s offer in AppSource, purchase a subscription to the offer from AppSource, and get licenses for the ISV app. | - Customers discover the ISV’s offer in AppSource or directly on the ISV’s website. Customers purchase licenses for the plans they want directly from the ISV.
    - The ISV registers the purchase with Microsoft in Partner Center. As part of [deal registration](/partner-center/csp-commercial-marketplace-licensing#register-isv-connect-deal-in-deal-registration), the ISV will specify the type and quantity of each licensing plan purchased by the customer. | + +### Step 4: Manage subscription -| Step | Details | +| Transactable offers | Licensable-only offers | | ------------ | ------------- | -| Step 1: Create offer | The ISV creates an offer in Partner Center and chooses to manage licenses for this offer through Microsoft. This includes defining one or more licensing plans for the offer. For more information, see [Create a Dynamics 365 apps on Dataverse and Power Apps offer on Microsoft AppSource](dynamics-365-customer-engage-offer-setup.md). | -| Step 2: Update package | The ISV creates a solution package for the offer that includes license plan information as metadata, and uploads it to Partner Center for publication to Microsoft AppSource. To learn more, see [Adding license metadata to your solution](/powerapps/developer/data-platform/appendix-add-license-information-to-your-solution). | -| Step 3: Purchase licenses | Customers discover the ISV’s offer in AppSource or directly on the ISV’s website. Customers purchase licenses for the plans they want directly from the ISV (these offers are not purchasable through AppSource at this time). | -| Step 4: Register deal | The ISV registers the purchase with Microsoft in Partner Center. As part of [deal registration](/partner-center/csp-commercial-marketplace-licensing#register-isv-connect-deal-in-deal-registration), the ISV will specify the type and quantity of each licensing plan purchased by the customer. | -| Step 5: Manage licenses | The license plans will appear in Microsoft 365 Admin Center for the customer to [assign to users or groups](/microsoft-365/commerce/licenses/manage-third-party-app-licenses) in their organization. The customer can also install the application in their tenant via the Power Platform Admin Center. | -| Step 6: Perform license check | When a user within the customer’s organization tries to run an application, Microsoft checks to ensure that user has a license before permitting them to run it. If they don’t have a license, the user sees a message explaining that they need to contact an administrator for a license. | -| Step 7: View reports | ISVs can view information on provisioned and assigned licenses over a period of time and by geography. | +| Customers can manage subscriptions for the Apps they purchased in [Microsoft 365 admin center](https://admin.microsoft.com/), just like they normally do for any of their Microsoft Office or Dynamics subscriptions. | ISVs activate and manage deals in Partner Center ([deal registration portal(https://partner.microsoft.com/)]) | + +### Step 5: Assign licenses + +Customers can assign licenses of these add-ons in license pages under the billing node in [Microsoft 365 admin center](https://admin.microsoft.com/). Customers can assign licenses to users or groups. Doing so will enable these users to launch the ISV app. Customers can also install the app from [Microsoft 365 admin center](https://admin.microsoft.com/) into their Power Platform environment. + +**Licensable-only offers:** +- The license plans will appear in Microsoft 365 Admin Center for the customer to [assign to users or groups](/microsoft-365/commerce/licenses/manage-third-party-app-licenses) in their organization. The customer can also install the application in their tenant via the Power Platform Admin Center. + +### Step 6: Power Platform performs license checks + +When a user within the customer’s organization tries to run an application, Microsoft checks to ensure that the user has a license before permitting them to run it. If they do not have a license, the user sees a message explaining that they need to contact an administrator for a license. + +### Step 7: View reports + +ISVs can view information on: +- Orders purchased, renewed, or cancelled over time and by geography. + +- Provisioned and assigned licenses over a period of time and by geography. ## Enabling app license management through Microsoft @@ -60,9 +92,12 @@ Here’s how it works: - After you select the **Enable app license management through Microsoft** box, you can define licensing plans for your offer. - Customers will see a **Get it now** button on the offer listing page in AppSource. Customers can select this button to contact you to purchase licenses for the app. +> [!NOTE] +> This check box is automatically enabled if you choose to sell your offer through Microsoft and have Microsoft host transactions on your behalf. + ### Allow customers to install my app even if licenses are not assigned check box -After you select the first box, the **Allow customers to install my app even if licenses are not assigned** box appears. This option is useful if you are employing a “freemium” licensing strategy whereby you want to offer some basic features of your solution for free to all users and charge for premium features. Conversely, if you want to ensure that only tenants who currently own licenses for your product can download it from AppSource, then don’t select this option. +If you choose to list your offer through the marketplace and process transactions independently, after you select the first box, the **Allow customers to install my app even if licenses are not assigned** box appears. This option is useful if you are employing a “freemium” licensing strategy whereby you want to offer some basic features of your solution for free to all users and charge for premium features. Conversely, if you want to ensure that only tenants who currently own licenses for your product can download it from AppSource, then don’t select this option. > [!NOTE] > If you choose this option, you need to configure your solution package to not require a license. @@ -80,9 +115,7 @@ After your offer is published, the options you chose will drive which buttons ap :::image type="content" source="./media/third-party-license/f365.png" alt-text="Screenshot of an offer listing page on AppSource. The Get it now and Contact me buttons are shown."::: -***Figure 1: Offer listing page on Microsoft AppSource*** - ## Next steps - [Plan a Dynamics 365 offer](marketplace-dynamics-365.md) -- [How to create a Dynamics 365 apps on Dataverse and Power Apps offer](dynamics-365-customer-engage-offer-setup.md) +- [Create a Dynamics 365 apps on Dataverse and Power Apps offer](dynamics-365-customer-engage-offer-setup.md) diff --git a/articles/marketplace/marketplace-dynamics-365.md b/articles/marketplace/marketplace-dynamics-365.md index 8b47576d55b8..df7b73adf7a9 100644 --- a/articles/marketplace/marketplace-dynamics-365.md +++ b/articles/marketplace/marketplace-dynamics-365.md @@ -6,7 +6,7 @@ ms.subservice: partnercenter-marketplace-publisher ms.topic: conceptual author: vamahtan ms.author: vamahtan -ms.date: 04/13/2022 +ms.date: 05/25/2022 --- # Plan a Microsoft Dynamics 365 offer @@ -15,11 +15,11 @@ This article explains the different options and features of a Dynamics 365 offer Before you start, create a commercial marketplace account in [Partner Center](./create-account.md) and ensure it is enrolled in the commercial marketplace program. Also, review the [publishing process and guidelines](/office/dev/store/submit-to-appsource-via-partner-center). -## Licensing options +## Listing options -As you prepare to publish a new offer, you need to decide which licensing option to choose. This will determine what additional information you'll need to provide later as you create the offer in Partner Center. +As you prepare to publish a new offer, you need to decide which listing option to choose. This will determine what additional information you'll need to provide later as you create the offer in Partner Center. -These are the available licensing options for Dynamics 365 offer types: +These are the available listing options for the _Dynamics 365 apps on Dataverse and Power Apps_ offer type: | Offer type | Listing option | | --- | --- | @@ -31,12 +31,13 @@ These are the available licensing options for Dynamics 365 offer types: The following table describes the transaction process of each listing option. -| Licensing option | Transaction process | +| Listing option | Transaction process | | --- | --- | +| Transact with license management | You can choose to sell through Microsoft and have Microsoft host transactions on your behalf. For more information about this option, see [ISV app license management](isv-app-license.md).
    Currently available to the following offer type only:

    • Dynamics 365 apps on Dataverse and Power Apps
    | +| License management | Enables you to manage your ISV app licenses in Partner Center. For more information about this option, see [ISV app license management](isv-app-license.md).
    Currently available to the following offer type only:
    • Dynamics 365 apps on Dataverse and Power Apps
    | | Contact me | Collect customer contact information by connecting your Customer Relationship Management (CRM) system. The customer will be asked for permission to share their information. These customer details, along with the offer name, ID, and marketplace source where they found your offer, will be sent to the CRM system that you've configured. For more information about configuring your CRM, see the **Customer leads** section of your offer type's **Offer setup** page. | | Free trial (listing) | Offer your customers a one-, three- or six-month free trial. Offer listing free trials are created, managed, and configured by your service and do not have subscriptions managed by Microsoft. | | Get it now (free) | List your offer to customers for free. | -| Get it now | Enables you to manage your ISV app licenses in Partner Center.
    Currently available to the following offer type only:
    • Dynamics 365 apps on Dataverse and Power Apps

    For more information about this option, see [ISV app license management](isv-app-license.md). | ## Test drive diff --git a/articles/marketplace/media/dynamics-365/preview-link.png b/articles/marketplace/media/dynamics-365/preview-link.png new file mode 100644 index 000000000000..f3456f839a92 Binary files /dev/null and b/articles/marketplace/media/dynamics-365/preview-link.png differ diff --git a/articles/marketplace/media/third-party-license/service-id-workspaces.png b/articles/marketplace/media/third-party-license/service-id-workspaces.png index 7701457f4165..f31ceeeceed8 100644 Binary files a/articles/marketplace/media/third-party-license/service-id-workspaces.png and b/articles/marketplace/media/third-party-license/service-id-workspaces.png differ diff --git a/articles/marketplace/plan-azure-app-managed-app.md b/articles/marketplace/plan-azure-app-managed-app.md index 7c4193286c09..f74f26521c1c 100644 --- a/articles/marketplace/plan-azure-app-managed-app.md +++ b/articles/marketplace/plan-azure-app-managed-app.md @@ -7,7 +7,7 @@ ms.reviewer: dannyevers ms.service: marketplace ms.subservice: partnercenter-marketplace-publisher ms.topic: conceptual -ms.date: 11/02/2021 +ms.date: 05/25/2022 --- # Plan an Azure managed application for an Azure application offer @@ -28,7 +28,7 @@ Use an Azure Application: Managed application plan when the following conditions | An Azure subscription | Managed applications must be deployed to a customer's subscription, but they can be managed by a third party. | | Billing and metering | The resources are provided in a customer's Azure subscription. VMs that use the pay-as-you-go payment model are transacted with the customer via Microsoft and billed via the customer's Azure subscription.

    For bring-your-own-license VMs, Microsoft bills any infrastructure costs that are incurred in the customer subscription, but you transact software licensing fees with the customer directly. | | Azure-compatible virtual hard disk (VHD) | VMs must be built on Windows or Linux. For more information, see:
    * [Create an Azure VM technical asset](./azure-vm-certification-faq.yml#address-a-vulnerability-or-an-exploit-in-a-vm-offer) (for Windows VHDs).
    * [Linux distributions endorsed on Azure](../virtual-machines/linux/endorsed-distros.md) (for Linux VHDs). | -| Customer usage attribution | All new Azure application offers must also include an [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md) GUID. For more information about customer usage attribution and how to enable it, see [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md). | +| Customer usage attribution | For more information about customer usage attribution and how to enable it, see [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md). | | Deployment package | You'll need a deployment package that will let customers deploy your plan. If you create multiple plans that require the same technical configuration, you can use the same package. For details, see the next section: Deployment package. | > [!NOTE] @@ -70,8 +70,6 @@ Maximum file sizes supported are: - Up to 1 Gb in total compressed .zip archive size - Up to 1 Gb for any individual uncompressed file within the .zip archive -All new Azure application offers must also include an [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md) GUID. - ## Azure regions You can publish your plan to the Azure public region, Azure Government region, or both. Before publishing to [Azure Government](../azure-government/documentation-government-manage-marketplace-partners.md), test and validate your plan in the environment as certain endpoints may differ. To set up and test your plan, request a trial account from [Microsoft Azure Government trial](https://azure.microsoft.com/global-infrastructure/government/request/). diff --git a/articles/marketplace/plan-azure-app-solution-template.md b/articles/marketplace/plan-azure-app-solution-template.md index 3184ce22d15c..752657e9c2b9 100644 --- a/articles/marketplace/plan-azure-app-solution-template.md +++ b/articles/marketplace/plan-azure-app-solution-template.md @@ -7,7 +7,7 @@ ms.reviewer: dannyevers ms.service: marketplace ms.subservice: partnercenter-marketplace-publisher ms.topic: conceptual -ms.date: 11/11/2021 +ms.date: 05/25/2022 --- # Plan a solution template for an Azure application offer @@ -40,8 +40,6 @@ Maximum file sizes supported are: - Up to 1 Gb in total compressed .zip archive size - Up to 1 Gb for any individual uncompressed file within the .zip archive -All new Azure application offers must also include an [Azure partner customer usage attribution](azure-partner-customer-usage-attribution.md) GUID. - ## Azure regions You can publish your plan to the Azure public region, Azure Government region, or both. Before publishing to [Azure Government](../azure-government/documentation-government-manage-marketplace-partners.md), test and validate your plan in the environment as certain endpoints may differ. To set up and test your plan, request a trial account from [Microsoft Azure Government trial](https://azure.microsoft.com/global-infrastructure/government/request/). diff --git a/articles/marketplace/what-is-new.md b/articles/marketplace/what-is-new.md index 203fc209a3f8..81ece0028dfa 100644 --- a/articles/marketplace/what-is-new.md +++ b/articles/marketplace/what-is-new.md @@ -48,7 +48,7 @@ Learn about important updates in the commercial marketplace program of Partner C | Payouts | We updated the payment schedule for [Payout schedules and processes](/partner-center/payout-policy-details). | 2022-01-19 | | Analytics | Added questions and answers to the [Commercial marketplace analytics FAQ](./analytics-faq.yml), such as enrolling in the commercial marketplace, where to create a marketplace offer, getting started with programmatic access to commercial marketplace analytics reports, and more. | 2022-01-07 | | Offers | Added a new article, [Troubleshooting Private Plans in the commercial marketplace](azure-private-plan-troubleshooting.md). | 2021-12-13 | -| Offers | We have updated the names of [Dynamics 365](./marketplace-dynamics-365.md#licensing-options) offer types:

    -Dynamics 365 for Customer Engagement & PowerApps is now **Dynamics 365 apps on Dataverse and Power Apps**
    - Dynamics 365 for operations is now **Dynamics 365 Operations Apps**
    - Dynamics 365 business central is now **Dynamics 365 Business Central** | 2021-12-03 | +| Offers | We have updated the names of [Dynamics 365](./marketplace-dynamics-365.md#listing-options) offer types:

    -Dynamics 365 for Customer Engagement & PowerApps is now **Dynamics 365 apps on Dataverse and Power Apps**
    - Dynamics 365 for operations is now **Dynamics 365 Operations Apps**
    - Dynamics 365 business central is now **Dynamics 365 Business Central** | 2021-12-03 | | Policy | We’ve created an [FAQ topic](/legal/marketplace/mpa-faq) to answer publisher questions about the Microsoft Publisher Agreement. | 2021-09-27 | | Policy | We've updated the [Microsoft Publisher Agreement](/legal/marketplace/msft-publisher-agreement). For change history, see [Microsoft Publisher Agreement Version 8.0 – October 2021 Update](/legal/marketplace/mpa-change-history-oct-2021). | 2021-09-14 | | Policy | Updated [certification](/legal/marketplace/certification-policies) policy for September; see [change history](/legal/marketplace/offer-policies-change-history). | 2021-09-10 | diff --git a/articles/mysql/TOC.yml b/articles/mysql/TOC.yml index eb58789bb704..16cd59d63e4d 100644 --- a/articles/mysql/TOC.yml +++ b/articles/mysql/TOC.yml @@ -463,8 +463,8 @@ displayName: recommendation - name: Azure Advisor recommendations href: single-server/concepts-azure-advisor-recommendations.md - - name: Tutorials - items: + - name: Tutorials + items: - name: Application Development items: - name: Design a database diff --git a/articles/mysql/flexible-server/whats-new.md b/articles/mysql/flexible-server/whats-new.md index d9c432d1d620..a7687d875cae 100644 --- a/articles/mysql/flexible-server/whats-new.md +++ b/articles/mysql/flexible-server/whats-new.md @@ -18,6 +18,14 @@ ms.date: 05/24/2022 This article summarizes new releases and features in Azure Database for MySQL - Flexible Server beginning in January 2021. Listings appear in reverse chronological order, with the most recent updates first. +## May 2022 + +- **Announcing Azure Database for MySQL - Flexible Server for business-critical workloads** + Azure Database for MySQL – Flexible Server Business Critical service tier is now generally available. Business Critical service tier is ideal for Tier 1 production workloads that require low latency, high concurrency, fast failover, and high scalability, such as gaming, e-commerce, and Internet-scale applications, to learn more about [Business Critical service Tier](https://techcommunity.microsoft.com/t5/azure-database-for-mysql-blog/announcing-azure-database-for-mysql-flexible-server-for-business/ba-p/3361718). + +- **Announcing the addition of new Burstable compute instances for Azure Database for MySQL - Flexible Server** + We are announcing the addition of new Burstable compute instances to support customers’ auto-scaling compute requirements from 1 vCore up to 20 vCores. learn more about [Compute Option for Azure Database for MySQL - Flexible Server](https://docs.microsoft.com/azure/mysql/flexible-server/concepts-compute-storage). + ## April 2022 - **Minor version upgrade for Azure Database for MySQL - Flexible server to 8.0.28** diff --git a/articles/mysql/migrate/mysql-on-premises-azure-db/10-post-migration-management.md b/articles/mysql/migrate/mysql-on-premises-azure-db/10-post-migration-management.md index 3b58bb4da5b8..71421954fbd8 100644 --- a/articles/mysql/migrate/mysql-on-premises-azure-db/10-post-migration-management.md +++ b/articles/mysql/migrate/mysql-on-premises-azure-db/10-post-migration-management.md @@ -23,7 +23,7 @@ ms.date: 06/21/2021 Once the migration has been successfully completed, the next phase it to manage the new cloud-based data workload resources. Management operations include both control plane and data plane activities. Control plane activities are those related to the Azure resources versus data plane, which is **inside** the Azure resource (in this case MySQL). -Azure Database for MySQL provides for the ability to monitor both of these types of operational activities using Azure-based tools such as [Azure Monitor,](../../../azure-monitor/overview.md) [Log Analytics](../../../azure-monitor/logs/design-logs-deployment.md) and [Microsoft Sentinel](../../../sentinel/overview.md). In addition to the Azure-based tools, security information and event management (SIEM) systems can be configured to consume these logs as well. +Azure Database for MySQL provides for the ability to monitor both of these types of operational activities using Azure-based tools such as [Azure Monitor,](../../../azure-monitor/overview.md) [Log Analytics](../../../azure-monitor/logs/log-analytics-overview.md) and [Microsoft Sentinel](../../../sentinel/overview.md). In addition to the Azure-based tools, security information and event management (SIEM) systems can be configured to consume these logs as well. Whichever tool is used to monitor the new cloud-based workloads, alerts need to be created to warn Azure and database administrators of any suspicious activity. If a particular alert event has a well-defined remediation path, alerts can fire automated [Azure run books](../../../automation/learn/powershell-runbook-managed-identity.md) to address the event. @@ -97,7 +97,7 @@ The [Planned maintenance notification](../../concepts-monitoring.md#planned-main ## WWI scenario -WWI decided to utilize the Azure Activity logs and enable MySQL logging to flow to a [Log Analytics workspace.](../../../azure-monitor/logs/design-logs-deployment.md) This workspace is configured to be a part of [Microsoft Sentinel](../../../sentinel/index.yml) such that any [Threat Analytics](../../concepts-security.md#threat-protection) events would be surfaced, and incidents created. +WWI decided to utilize the Azure Activity logs and enable MySQL logging to flow to a [Log Analytics workspace.](../../../azure-monitor/logs/workspace-design.md) This workspace is configured to be a part of [Microsoft Sentinel](../../../sentinel/index.yml) such that any [Threat Analytics](../../concepts-security.md#threat-protection) events would be surfaced, and incidents created. The MySQL DBAs installed the Azure Database for [MySQL Azure PowerShell cmdlets](../../quickstart-create-mysql-server-database-using-azure-powershell.md) to make managing the MySQL Server automated versus having to log to the Azure portal each time. diff --git a/articles/mysql/migrate/mysql-on-premises-azure-db/13-security.md b/articles/mysql/migrate/mysql-on-premises-azure-db/13-security.md index 4372deb941c2..4bc64d8c88c0 100644 --- a/articles/mysql/migrate/mysql-on-premises-azure-db/13-security.md +++ b/articles/mysql/migrate/mysql-on-premises-azure-db/13-security.md @@ -38,7 +38,7 @@ if a user or application credentials are compromised, logs are not likely to ref ## Audit logging -MySQL has a robust built-in audit log feature. By default, this [audit log feature is disabled](../../concepts-audit-logs.md) in Azure Database for MySQL. Server level logging can be enabled by changing the `audit\_log\_enabled` server parameter. Once enabled, logs can be accessed through [Azure Monitor](../../../azure-monitor/overview.md) and [Log Analytics](../../../azure-monitor/logs/design-logs-deployment.md) by turning on [diagnostic logging.](../../howto-configure-audit-logs-portal.md#set-up-diagnostic-logs) +MySQL has a robust built-in audit log feature. By default, this [audit log feature is disabled](../../concepts-audit-logs.md) in Azure Database for MySQL. Server level logging can be enabled by changing the `audit\_log\_enabled` server parameter. Once enabled, logs can be accessed through [Azure Monitor](../../../azure-monitor/overview.md) and [Log Analytics](../../../azure-monitor/logs/log-analytics-workspace-overview.md) by turning on [diagnostic logging.](../../howto-configure-audit-logs-portal.md#set-up-diagnostic-logs) To query for user connection-related events, run the following KQL query: diff --git a/articles/mysql/single-server/concepts-aks.md b/articles/mysql/single-server/concepts-aks.md index e5f37cce94ba..e2eddf7370ca 100644 --- a/articles/mysql/single-server/concepts-aks.md +++ b/articles/mysql/single-server/concepts-aks.md @@ -59,4 +59,4 @@ az network nic list --resource-group nodeResourceGroup -o table ## Next steps -Create an AKS cluster [using the Azure CLI](/azure/aks/learn/quick-kubernetes-deploy-cli), [using Azure PowerShell](/azure/aks/learn/quick-kubernetes-deploy-powershell), or [using the Azure portal](/azure/aks/learn/quick-kubernetes-deploy-portal). +Create an AKS cluster [using the Azure CLI](../../aks/learn/quick-kubernetes-deploy-cli.md), [using Azure PowerShell](../../aks/learn/quick-kubernetes-deploy-powershell.md), or [using the Azure portal](../../aks/learn/quick-kubernetes-deploy-portal.md). \ No newline at end of file diff --git a/articles/mysql/single-server/single-server-overview.md b/articles/mysql/single-server/single-server-overview.md index 59191d31ba92..d0aa5d0fc010 100644 --- a/articles/mysql/single-server/single-server-overview.md +++ b/articles/mysql/single-server/single-server-overview.md @@ -58,7 +58,7 @@ Single Server is available in three SKU tiers: Basic, General Purpose, and Memor Single Server uses the FIPS 140-2 validated cryptographic module for storage encryption of data at-rest. Data, including backups, and temporary files created while running queries are encrypted. The service uses the AES 256-bit cipher included in Azure storage encryption, and the keys can be system managed (default) or [customer managed](concepts-data-encryption-mysql.md). The service encrypts data in-motion with transport layer security (SSL/TLS) enforced by default. The service supports TLS versions 1.2, 1.1 and 1.0 with an ability to enforce [minimum TLS version](concepts-ssl-connection-security.md). -The service allows private access to the servers using [private link](concepts-data-access-security-private-link.md) and offers threat protection through the optional [Microsoft Defender for open-source relational databases](/azure/defender-for-cloud/defender-for-databases-introduction) plan. Microsoft Defender for open-source relational databases detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit databases. +The service allows private access to the servers using [private link](concepts-data-access-security-private-link.md) and offers threat protection through the optional [Microsoft Defender for open-source relational databases](../../defender-for-cloud/defender-for-databases-introduction.md) plan. Microsoft Defender for open-source relational databases detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit databases. In addition to native authentication, Single Server supports [Azure Active Directory](../../active-directory/fundamentals/active-directory-whatis.md) authentication. Azure AD authentication is a mechanism of connecting to the MySQL servers using identities defined and managed in Azure AD. With Azure AD authentication, you can manage database user identities and other Azure services in a central location, which simplifies and centralizes access control. @@ -106,4 +106,4 @@ Now that you've read an introduction to Azure Database for MySQL - Single Server - [Ruby](./connect-ruby.md) - [PHP](./connect-php.md) - [.NET (C#)](./connect-csharp.md) - - [Go](./connect-go.md) + - [Go](./connect-go.md) \ No newline at end of file diff --git a/articles/notification-hubs/notification-hubs-android-push-notification-google-fcm-get-started.md b/articles/notification-hubs/notification-hubs-android-push-notification-google-fcm-get-started.md index 9a8759b3ae2f..75fbb0c5c6bf 100644 --- a/articles/notification-hubs/notification-hubs-android-push-notification-google-fcm-get-started.md +++ b/articles/notification-hubs/notification-hubs-android-push-notification-google-fcm-get-started.md @@ -169,7 +169,7 @@ Your hub is now configured to work with Firebase Cloud Messaging. You also have 1. In the Project View, expand **app** > **src** > **main** > **java**. Right-click your package folder under **java**, select **New**, and then select **Java Class**. Enter **NotificationSettings** for the name, and then select **OK**. - Make sure to update these three placeholders in the following code for the `NotificationSettings` class: + Make sure to update these two placeholders in the following code for the `NotificationSettings` class: * **HubListenConnectionString**: The **DefaultListenAccessSignature** connection string for your hub. You can copy that connection string by clicking **Access Policies** in your hub in the [Azure portal]. * **HubName**: Use the name of your hub that appears in the hub page in the [Azure portal]. diff --git a/articles/openshift/howto-create-service-principal.md b/articles/openshift/howto-create-service-principal.md index c05df94fd661..469e996e58f3 100644 --- a/articles/openshift/howto-create-service-principal.md +++ b/articles/openshift/howto-create-service-principal.md @@ -1,6 +1,6 @@ --- title: Creating and using a service principal with an Azure Red Hat OpenShift cluster -description: In this how-to article, learn how to create a service principal with an Azure Red Hat OpenShift cluster using Azure CLI or the Azure portal. +description: In this how-to article, learn how to create and use a service principal with an Azure Red Hat OpenShift cluster using Azure CLI or the Azure portal. author: rahulm23 ms.service: azure-redhat-openshift ms.topic: how-to @@ -12,41 +12,41 @@ keywords: azure, openshift, aro, red hat, azure CLI, azure portal zone_pivot_groups: azure-red-hat-openshift-service-principal --- -# Create and use a service principal with an Azure Red Hat OpenShift cluster +# Create and use a service principal to deploy an Azure Red Hat OpenShift cluster To interact with Azure APIs, an Azure Red Hat OpenShift cluster requires an Azure Active Directory (AD) service principal. This service principal is used to dynamically create, manage, or access other Azure resources, such as an Azure load balancer or an Azure Container Registry (ACR). For more information, see [Application and service principal objects in Azure Active Directory](../active-directory/develop/app-objects-and-service-principals.md). -This article explains how to create and use a service principal for your Azure Red Hat OpenShift clusters using the Azure command-line interface (Azure CLI) or the Azure portal. +This article explains how to create and use a service principal to deploy your Azure Red Hat OpenShift clusters using the Azure command-line interface (Azure CLI) or the Azure portal. > [!NOTE] > Service principals expire in one year unless configured for longer periods. For information on extending your service principal expiration period, see [Rotate service principal credentials for your Azure Red Hat OpenShift (ARO) Cluster](howto-service-principal-credential-rotation.md). ::: zone pivot="aro-azurecli" -## Create a service principal with Azure CLI +## Create and use a service principal -The following sections explain how to use the Azure CLI to create a service principal for your Azure Red Hat OpenShift cluster +The following sections explain how to create and use a service principal to deploy an Azure Red Hat OpenShift cluster. ## Prerequisites - Azure CLI If you’re using the Azure CLI, you’ll need Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI](/cli/azure/install-azure-cli). -On [Use the portal to create an Azure AD application and service principal that can access resources](../active-directory/develop/howto-create-service-principal-portal.md) create a service principal. Be sure to save the client ID and the appID. +## Create a resource group - Azure CLI -## Create a resource group +Run the following Azure CLI command to create a resource group. ```azurecli-interactive AZ_RG=$(az group create -n test-aro-rg -l eastus2 --query name -o tsv) ``` -## Create a service principal - Azure CLI +## Create a service principal and assign role-based access control (RBAC) - Azure CLI - To create a service principal with the Azure CLI, run the following command. + To assign the contributor role and scope the service principal to the Azure Red Hat OpenShift resource group, run the following command. ```azurecli-interactive # Get Azure subscription ID AZ_SUB_ID=$(az account show --query id -o tsv) -# Create a service principal with contributor role and scoped to the ARO resource group +# Create a service principal with contributor role and scoped to the Azure Red Hat OpenShift resource group az ad sp create-for-rbac -n "test-aro-SP" --role contributor --scopes "/subscriptions/${AZ_SUB_ID}/resourceGroups/${AZ_RG}" ``` @@ -63,23 +63,19 @@ The output is similar to the following example. "password": "yourpassword", - "tenant": "yourtenantname" t + "tenant": "yourtenantname" } ``` - -Retain your `appId` and `password`. These values are used when you create an Azure Red Hat OpenShift cluster below. > [!NOTE] -> This service principal only allows a contributor over the resource group the ARO cluster is located in. If your VNet is in another resource group, you need to assign the service principal contributor role to that resource group as well. - -For more information, see [Manage service principal roles](/cli/azure/create-an-azure-service-principal-azure-cli#3-manage-service-principal-roles). +> This service principal only allows a contributor over the resource group the Azure Red Hat OpenShift cluster is located in. If your VNet is in another resource group, you need to assign the service principal contributor role to that resource group as well. To grant permissions to an existing service principal with the Azure portal, see [Create an Azure AD app and service principal in the portal](../active-directory/develop/howto-create-service-principal-portal.md#configure-access-policies-on-resources). -## Use the service principal to create a cluster - Azure CLI +## Use the service principal to deploy an Azure Red Hat OpenShift cluster - Azure CLI -To use an existing service principal when you create an Azure Red Hat OpenShift cluster using the `az aro create` command, use the `--client-id` and `--client-secret` parameters to specify the appId and password from the output of the `az ad sp create-for-rbac` command: +Using the service principal that you created when you created the Azure Red Hat OpenShift cluster, use the `az aro create` command to deploy the Azure Red Hat OpenShift cluster. Use the `--client-id` and `--client-secret` parameters to specify the appId and password from the output of the `az ad sp create-for-rbac` command, as shown in the following command. ```azure-cli az aro create \ @@ -104,19 +100,19 @@ az aro create \ The following sections explain how to use the Azure portal to create a service principal for your Azure Red Hat OpenShift cluster. -## Prerequiste - Azure portal +## Prerequisite - Azure portal -On [Use the portal to create an Azure AD application and service principal that can access resources](../active-directory/develop/howto-create-service-principal-portal.md) create a service principal. Be sure to save the client ID and the appID. +Create a service principal, as explained in [Use the portal to create an Azure AD application and service principal that can access resources](../active-directory/develop/howto-create-service-principal-portal.md). **Be sure to save the client ID and the appID.** -## Create a service principal - Azure portal +## To use the service principal to deploy an Azure Red Hat OpenShift cluster - Azure portal -To create a service principal using the Azure portal, complete the following steps. +To use the service principal you created to deploy a cluster, complete the following steps. 1. On the Create Azure Red Hat OpenShift **Basics** tab, create a resource group for your subscription, as shown in the following example. :::image type="content" source="./media/basics-openshift-sp.png" alt-text="Screenshot that shows how to use the Azure Red Hat service principal with Azure portal to create a cluster." lightbox="./media/basics-openshift-sp.png"::: -2. Click **Next: Authentication** to configure and deploy the service principal on the **Authentication** page of the **Azure Red Hat OpenShift** dialog. +2. Select **Next: Authentication** to configure the service principal on the **Authentication** page of the **Azure Red Hat OpenShift** dialog. :::image type="content" source="./media/openshift-service-principal-portal.png" alt-text="Screenshot that shows how to use the Authentication tab with Azure portal to create a service principal." lightbox="./media/openshift-service-principal-portal.png"::: @@ -129,7 +125,7 @@ In the **Cluster pull secret** section: - **Pull secret** is your cluster's pull secret's decrypted value. If you don't have a pull secret, leave this field blank. -After completing this tab, select **Next: Networking** to continue creating your cluster. Select **Review + Create** when you complete the remaining tabs. +After completing this tab, select **Next: Networking** to continue deploying your cluster. Select **Review + Create** when you complete the remaining tabs. > [!NOTE] > This service principal only allows a contributor over the resource group the Azure Red Hat OpenShift cluster is located in. If your VNet is in another resource group, you need to assign the service principal contributor role to that resource group as well. diff --git a/articles/openshift/index.yml b/articles/openshift/index.yml index 5ac66998a889..62748b96ddc4 100644 --- a/articles/openshift/index.yml +++ b/articles/openshift/index.yml @@ -59,7 +59,7 @@ landingContent: - linkListType: reference links: - text: Azure CLI OpenShift 3 - url: /cli/azure/openshift + url: /azure/openshift/ - text: Azure CLI OpenShift 4 url: /cli/azure/aro - text: REST API diff --git a/articles/openshift/quickstart-portal.md b/articles/openshift/quickstart-portal.md index f4d1e6043fe3..ead29f1c35ed 100644 --- a/articles/openshift/quickstart-portal.md +++ b/articles/openshift/quickstart-portal.md @@ -16,7 +16,7 @@ Azure Red Hat OpenShift is a managed OpenShift service that lets you quickly dep ## Prerequisites Sign in to the [Azure portal](https://portal.azure.com). -On [Use the portal to create an Azure AD application and service principal that can access resources](/active-directory/develop/howto-create-service-principal-portal) create a service principal. Be sure to save the client ID and the appID. +Create a service principal, as explained in [Use the portal to create an Azure AD application and service principal that can access resources](/azure/active-directory/develop/howto-create-service-principal-portal). **Be sure to save the client ID and the appID.** ## Create an Azure Red Hat OpenShift cluster 1. On the Azure portal menu or from the **Home** page, select **All Services** under three horizontal bars on the top left hand page. @@ -56,7 +56,7 @@ On [Use the portal to create an Azure AD application and service principal that ![**Tags** tab on Azure portal](./media/Tags.png) -7. Click **Review + create** and then **Create** when validation completes. +7. Check **Review + create** and then **Create** when validation completes. ![**Review + create** tab on Azure portal](./media/Review+Create.png) diff --git a/articles/openshift/support-policies-v4.md b/articles/openshift/support-policies-v4.md index 25bb879419ff..13f03d08a662 100644 --- a/articles/openshift/support-policies-v4.md +++ b/articles/openshift/support-policies-v4.md @@ -47,6 +47,18 @@ Azure Red Hat OpenShift 4 supports node instances on the following virtual machi |Dsv3|Standard_D8s_v3|8|32| |Dsv3|Standard_D16s_v3|16|64| |Dsv3|Standard_D32s_v3|32|128| +|Eiv3|Standard_E64i_v3|64|432| +|Eisv3|Standard_E64is_v3|64|432| +|Eis4|Standard_E80is_v4|80|504| +|Eids4|Standard_E80ids_v4|80|504| +|Eiv5|Standard_E104i_v5|104|672| +|Eisv5|Standard_E104is_v5|104|672| +|Eidv5|Standard_E104id_v5|104|672| +|Eidsv5|Standard_E104ids_v5|104|672| +|Fsv2|Standard_F72s_v2|72|144| +|G|Standard_G5|32|448| +|G|Standard_GS5|32|448| +|Mms|Standard_M128ms|128|3892| ### General purpose @@ -69,6 +81,14 @@ Azure Red Hat OpenShift 4 supports node instances on the following virtual machi |Esv3|Standard_E8s_v3|8|64| |Esv3|Standard_E16s_v3|16|128| |Esv3|Standard_E32s_v3|32|256| +|Eiv3|Standard_E64i_v3|64|432| +|Eisv3|Standard_E64is_v3|64|432| +|Eis4|Standard_E80is_v4|80|504| +|Eids4|Standard_E80ids_v4|80|504| +|Eiv5|Standard_E104i_v5|104|672| +|Eisv5|Standard_E104is_v5|104|672| +|Eidv5|Standard_E104id_v5|104|672| +|Eidsv5|Standard_E104ids_v5|104|672| ### Compute optimized @@ -78,6 +98,13 @@ Azure Red Hat OpenShift 4 supports node instances on the following virtual machi |Fsv2|Standard_F8s_v2|8|16| |Fsv2|Standard_F16s_v2|16|32| |Fsv2|Standard_F32s_v2|32|64| +|Fsv2|Standard_F72s_v2|72|144| + +### Memory and compute optimized + +|Series|Size|vCPU|Memory: GiB| +|-|-|-|-| +|Mms|Standard_M128ms|128|3892| ### Storage optimized @@ -92,3 +119,10 @@ Azure Red Hat OpenShift 4 supports node instances on the following virtual machi |L32s_v2|Standard_L32s_v2|32|256| |L48s_v2|Standard_L48s_v2|32|384| |L64s_v2|Standard_L48s_v2|64|512| + +### Memory and storage optimized + +|Series|Size|vCPU|Memory: GiB| +|-|-|-|-| +|G|Standard_G5|32|448| +|G|Standard_GS5|32|448| \ No newline at end of file diff --git a/articles/partner-solutions/media/overview/confluent-cloud.png b/articles/partner-solutions/media/overview/confluent-cloud.png deleted file mode 100644 index 1b6bfd4a7146..000000000000 Binary files a/articles/partner-solutions/media/overview/confluent-cloud.png and /dev/null differ diff --git a/articles/postgresql/TOC.yml b/articles/postgresql/TOC.yml index 09a4494e8062..7cf290eea702 100644 --- a/articles/postgresql/TOC.yml +++ b/articles/postgresql/TOC.yml @@ -14,16 +14,6 @@ items: - name: Migration items: - - name: Single Server to Flexible Server migration (preview) - items: - - name: Single to Flexible - Concepts - href: single-server/concepts-single-to-flexible.md - - name: Single to Flexible - Migrate using portal - href: single-server/how-to-migrate-single-to-flexible-portal.md - - name: Single to Flexible - Migrate using CLI - href: single-server/how-to-migrate-single-to-flexible-cli.md - - name: Single to Flexible - Set up Azure AD app using portal - href: single-server/how-to-setup-azure-ad-app-portal.md - name: Migrate data with pg_dump and pg_restore href: single-server/how-to-migrate-using-dump-and-restore.md displayName: pg_dump, pg_restore diff --git a/articles/postgresql/flexible-server/overview.md b/articles/postgresql/flexible-server/overview.md index 2369888dcc18..7bc12235bb6d 100644 --- a/articles/postgresql/flexible-server/overview.md +++ b/articles/postgresql/flexible-server/overview.md @@ -37,7 +37,7 @@ Flexible servers are best suited for ## High availability -The flexible server deployment model is designed to support high availability within single availability zone and across multiple availability zones. The architecture separates compute and storage. The database engine runs on a container inside a Linux virtual machine, while data files reside on Azure storage. The storage maintains three locally redundant synchronous copies of the database files ensuring data durability. +The flexible server deployment model is designed to support high availability within a single availability zone and across multiple availability zones. The architecture separates compute and storage. The database engine runs on a container inside a Linux virtual machine, while data files reside on Azure storage. The storage maintains three locally redundant synchronous copies of the database files ensuring data durability. During planned or unplanned failover events, if the server goes down, the service maintains high availability of the servers using following automated procedure: diff --git a/articles/postgresql/single-server/concepts-aks.md b/articles/postgresql/single-server/concepts-aks.md index 19a331a0d28f..12485c6c2a40 100644 --- a/articles/postgresql/single-server/concepts-aks.md +++ b/articles/postgresql/single-server/concepts-aks.md @@ -44,4 +44,4 @@ There are multiple connection poolers you can use with PostgreSQL. One of these ## Next steps -Create an AKS cluster [using the Azure CLI](/azure/aks/learn/quick-kubernetes-deploy-cli), [using Azure PowerShell](/azure/aks/learn/quick-kubernetes-deploy-powershell), or [using the Azure portal](/azure/aks/learn/quick-kubernetes-deploy-portal). +Create an AKS cluster [using the Azure CLI](../../aks/learn/quick-kubernetes-deploy-cli.md), [using Azure PowerShell](../../aks/learn/quick-kubernetes-deploy-powershell.md), or [using the Azure portal](../../aks/learn/quick-kubernetes-deploy-portal.md). \ No newline at end of file diff --git a/articles/postgresql/single-server/concepts-security.md b/articles/postgresql/single-server/concepts-security.md index dcdde08d7415..082c93e05eca 100644 --- a/articles/postgresql/single-server/concepts-security.md +++ b/articles/postgresql/single-server/concepts-security.md @@ -46,7 +46,7 @@ You can also connect to the server using [Azure Active Directory authentication] ## Threat protection -You can opt in to [Advanced Threat Protection](/azure/defender-for-cloud/defender-for-databases-introduction) which detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit servers. +You can opt in to [Advanced Threat Protection](../../defender-for-cloud/defender-for-databases-introduction.md) which detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit servers. [Audit logging](concepts-audit.md) is available to track activity in your databases. @@ -56,4 +56,4 @@ Oracle supports Transparent Data Encryption (TDE) to encrypt table and tablespac ## Next steps - Enable firewall rules for [IPs](concepts-firewall-rules.md) or [virtual networks](concepts-data-access-and-security-vnet.md) -- Learn about [Azure Active Directory authentication](concepts-azure-ad-authentication.md) in Azure Database for PostgreSQL +- Learn about [Azure Active Directory authentication](concepts-azure-ad-authentication.md) in Azure Database for PostgreSQL \ No newline at end of file diff --git a/articles/private-link/create-private-endpoint-cli.md b/articles/private-link/create-private-endpoint-cli.md index ddb6dd46592e..b3ad836e87fd 100644 --- a/articles/private-link/create-private-endpoint-cli.md +++ b/articles/private-link/create-private-endpoint-cli.md @@ -5,7 +5,7 @@ services: private-link author: asudbring ms.service: private-link ms.topic: quickstart -ms.date: 11/07/2020 +ms.date: 05/24/2022 ms.author: allensu ms.custom: mode-api, devx-track-azurecli #Customer intent: As someone who has a basic network background but is new to Azure, I want to create a private endpoint by using the Azure CLI. @@ -16,7 +16,7 @@ Get started with Azure Private Link by using a private endpoint to connect secur In this quickstart, you'll create a private endpoint for an Azure web app and then create and deploy a virtual machine (VM) to test the private connection. -You can create private endpoints for a variety of Azure services, such as Azure SQL and Azure Storage. +You can create private endpoints for various Azure services, such as Azure SQL and Azure Storage. ## Prerequisites @@ -26,9 +26,9 @@ You can create private endpoints for a variety of Azure services, such as Azure * An Azure web app with a *PremiumV2-tier* or higher app service plan, deployed in your Azure subscription. - For more information and an example, see [Quickstart: Create an ASP.NET Core web app in Azure](../app-service/quickstart-dotnetcore.md). + - For more information and an example, see [Quickstart: Create an ASP.NET Core web app in Azure](../app-service/quickstart-dotnetcore.md). - For a detailed tutorial on creating a web app and an endpoint, see [Tutorial: Connect to a web app by using a private endpoint](tutorial-private-endpoint-webapp-portal.md). + - The example webapp in this article is named **myWebApp1979**. Replace the example with your webapp name. * The latest version of the Azure CLI, installed. @@ -40,7 +40,7 @@ You can create private endpoints for a variety of Azure services, such as Azure An Azure resource group is a logical container where Azure resources are deployed and managed. -First, create a resource group by using [az group create](/cli/azure/group#az-group-create): +First, create a resource group by using **[az group create](/cli/azure/group#az-group-create)**: ```azurecli-interactive az group create \ @@ -50,237 +50,192 @@ az group create \ ## Create a virtual network and bastion host -Next, create a virtual network, subnet, and bastion host. You'll use the bastion host to connect securely to the VM for testing the private endpoint. - -1. Create a virtual network by using [az network vnet create](/cli/azure/network/vnet#az-network-vnet-create): - - * Name: **myVNet** - * Address prefix: **10.0.0.0/16** - * Subnet name: **myBackendSubnet** - * Subnet prefix: **10.0.0.0/24** - * Resource group: **CreatePrivateEndpointQS-rg** - * Location: **eastus** - - ```azurecli-interactive - az network vnet create \ - --resource-group CreatePrivateEndpointQS-rg\ - --location eastus \ - --name myVNet \ - --address-prefixes 10.0.0.0/16 \ - --subnet-name myBackendSubnet \ - --subnet-prefixes 10.0.0.0/24 - ``` - -1. Update the subnet to disable private-endpoint network policies for the private endpoint by using [az network vnet subnet update](/cli/azure/network/vnet/subnet#az-network-vnet-subnet-update): +A virtual network and subnet is required for to host the private IP address for the private endpoint. You'll create a bastion host to connect securely to the virtual machine to test the private endpoint. You'll create the virtual machine in a later section. - ```azurecli-interactive - az network vnet subnet update \ - --name myBackendSubnet \ - --resource-group CreatePrivateEndpointQS-rg \ - --vnet-name myVNet \ - --disable-private-endpoint-network-policies true - ``` +Create a virtual network with **[az network vnet create](/cli/azure/network/vnet#az-network-vnet-create)**. -1. Create a public IP address for the bastion host by using [az network public-ip create](/cli/azure/network/public-ip#az-network-public-ip-create): +```azurecli-interactive +az network vnet create \ + --resource-group CreatePrivateEndpointQS-rg \ + --location eastus \ + --name myVNet \ + --address-prefixes 10.0.0.0/16 \ + --subnet-name myBackendSubnet \ + --subnet-prefixes 10.0.0.0/24 +``` - * Standard zone-redundant public IP address name: **myBastionIP** - * Resource group: **CreatePrivateEndpointQS-rg** +Create a bastion subnet with **[az network vnet subnet create](/cli/azure/network/vnet/subnet#az-network-vnet-subnet-create)**. - ```azurecli-interactive - az network public-ip create \ - --resource-group CreatePrivateEndpointQS-rg \ - --name myBastionIP \ - --sku Standard - ``` +```azurecli-interactive +az network vnet subnet create \ + --resource-group CreatePrivateEndpointQS-rg \ + --name AzureBastionSubnet \ + --vnet-name myVNet \ + --address-prefixes 10.0.1.0/27 +``` -1. Create a bastion subnet by using [az network vnet subnet create](/cli/azure/network/vnet/subnet#az-network-vnet-subnet-create): +Create a public IP address for the bastion host with **[az network public-ip create](/cli/azure/network/public-ip#az-network-public-ip-create)**. - * Name: **AzureBastionSubnet** - * Address prefix: **10.0.1.0/24** - * Virtual network: **myVNet** - * Resource group: **CreatePrivateEndpointQS-rg** +```azurecli-interactive +az network public-ip create \ + --resource-group CreatePrivateEndpointQS-rg \ + --name myBastionIP \ + --sku Standard \ + --zone 1 2 3 +``` - ```azurecli-interactive - az network vnet subnet create \ - --resource-group CreatePrivateEndpointQS-rg \ - --name AzureBastionSubnet \ - --vnet-name myVNet \ - --address-prefixes 10.0.1.0/24 - ``` +Create the bastion host with **[az network bastion create](/cli/azure/network/bastion#az-network-bastion-create)**. -1. Create a bastion host by using [az network bastion create](/cli/azure/network/bastion#az-network-bastion-create): - - * Name: **myBastionHost** - * Resource group: **CreatePrivateEndpointQS-rg** - * Public IP address: **myBastionIP** - * Virtual network: **myVNet** - * Location: **eastus** - - ```azurecli-interactive - az network bastion create \ - --resource-group CreatePrivateEndpointQS-rg \ - --name myBastionHost \ - --public-ip-address myBastionIP \ - --vnet-name myVNet \ - --location eastus - ``` +```azurecli-interactive +az network bastion create \ + --resource-group CreatePrivateEndpointQS-rg \ + --name myBastionHost \ + --public-ip-address myBastionIP \ + --vnet-name myVNet \ + --location eastus +``` It can take a few minutes for the Azure Bastion host to deploy. -## Create a test virtual machine - -Next, create a VM that you can use to test the private endpoint. - -1. Create the VM by using [az vm create](/cli/azure/vm#az-vm-create). +## Create a private endpoint -1. At the prompt, provide a password to be used as the credentials for the VM: +An Azure service that supports private endpoints is required to set up the private endpoint and connection to the virtual network. For the examples in this article, you'll use the Azure WebApp from the prerequisites. For more information on the Azure services that support a private endpoint, see [Azure Private Link availability](availability.md). - * Name: **myVM** - * Resource group: **CreatePrivateEndpointQS-rg** - * Virtual network: **myVNet** - * Subnet: **myBackendSubnet** - * Server image: **Win2019Datacenter** +A private endpoint can have a static or dynamically assigned IP address. +> [!IMPORTANT] +> You must have a previously deployed Azure WebApp to proceed with the steps in this article. For more information, see [Prerequisites](#prerequisites) . - ```azurecli-interactive - az vm create \ - --resource-group CreatePrivateEndpointQS-rg \ - --name myVM \ - --image Win2019Datacenter \ - --public-ip-address "" \ - --vnet-name myVNet \ - --subnet myBackendSubnet \ - --admin-username azureuser - ``` +Place the resource ID of the web app that you created earlier into a shell variable with **[az webapp list](/cli/azure/webapp#az-webapp-list)**. Create the private endpoint with **[az network private-endpoint create](/cli/azure/network/private-endpoint#az-network-private-endpoint-create)**. -[!INCLUDE [ephemeral-ip-note.md](../../includes/ephemeral-ip-note.md)] +# [**Dynamic IP**](#tab/dynamic-ip) -## Create a private endpoint - -Next, create the private endpoint. +```azurecli-interactive +id=$(az webapp list \ + --resource-group CreatePrivateEndpointQS-rg \ + --query '[].[id]' \ + --output tsv) -1. Place the resource ID of the web app that you created earlier into a shell variable by using [az webapp list](/cli/azure/webapp#az-webapp-list). +az network private-endpoint create \ + --connection-name myConnection + --name myPrivateEndpoint \ + --private-connection-resource-id $id \ + --resource-group CreatePrivateEndpointQS-rg \ + --subnet myBackendSubnet \ + --group-id sites \ + --vnet-name myVNet +``` -1. Create the endpoint and connection by using [az network private-endpoint create](/cli/azure/network/private-endpoint#az-network-private-endpoint-create): +# [**Static IP**](#tab/static-ip) - * Name: **myPrivateEndpoint** - * Resource group: **CreatePrivateEndpointQS-rg** - * Virtual network: **myVNet** - * Subnet: **myBackendSubnet** - * Connection name: **myConnection** - * Web app: **\** + ```azurecli-interactive +id=$(az webapp list \ + --resource-group CreatePrivateEndpointQS-rg \ + --query '[].[id]' \ + --output tsv) - ```azurecli-interactive - id=$(az webapp list \ - --resource-group \ - --query '[].[id]' \ - --output tsv) +az network private-endpoint create \ + --connection-name myConnection \ + --name myPrivateEndpoint \ + --private-connection-resource-id $id \ + --resource-group CreatePrivateEndpointQS-rg \ + --subnet myBackendSubnet \ + --group-id sites \ + --ip-config name=myIPconfig group-id=sites member-name=sites private-ip-address=10.0.0.10 \ + --vnet-name myVNet +``` - az network private-endpoint create \ - --name myPrivateEndpoint \ - --resource-group CreatePrivateEndpointQS-rg \ - --vnet-name myVNet --subnet myBackendSubnet \ - --private-connection-resource-id $id \ - --group-id sites \ - --connection-name myConnection - ``` +--- ## Configure the private DNS zone -Next, create and configure the private DNS zone by using [az network private-dns zone create](/cli/azure/network/private-dns/zone#az-network-private-dns-zone-create). +A private DNS zone is used to resolve the DNS name of the private endpoint in the virtual network. For this example, we're using the DNS information for an Azure WebApp, for more information on the DNS configuration of private endpoints, see [Azure Private Endpoint DNS configuration](private-endpoint-dns.md)]. -1. Create the virtual network link to the DNS zone by using [az network private-dns link vnet create](/cli/azure/network/private-dns/link/vnet#az-network-private-dns-link-vnet-create). +Create a new private Azure DNS zone with **[az network private-dns zone create](/cli/azure/network/private-dns/zone#az-network-private-dns-zone-create)**. -1. Create a DNS zone group by using [az network private-endpoint dns-zone-group create](/cli/azure/network/private-endpoint/dns-zone-group#az-network-private-endpoint-dns-zone-group-create). +```azurecli-interactive +az network private-dns zone create \ + --resource-group CreatePrivateEndpointQS-rg \ + --name "privatelink.azurewebsites.net" +``` - * Zone name: **privatelink.azurewebsites.net** - * Virtual network: **myVNet** - * Resource group: **CreatePrivateEndpointQS-rg** - * DNS link name: **myDNSLink** - * Endpoint name: **myPrivateEndpoint** - * Zone group name: **MyZoneGroup** +Link the DNS zone to the virtual network you created previously with **[az network private-dns link vnet create](/cli/azure/network/private-dns/link/vnet#az-network-private-dns-link-vnet-create)**. - ```azurecli-interactive - az network private-dns zone create \ - --resource-group CreatePrivateEndpointQS-rg \ - --name "privatelink.azurewebsites.net" +```azurecli-interactive +az network private-dns link vnet create \ + --resource-group CreatePrivateEndpointQS-rg \ + --zone-name "privatelink.azurewebsites.net" \ + --name MyDNSLink \ + --virtual-network myVNet \ + --registration-enabled false +``` - az network private-dns link vnet create \ - --resource-group CreatePrivateEndpointQS-rg \ - --zone-name "privatelink.azurewebsites.net" \ - --name MyDNSLink \ - --virtual-network myVNet \ - --registration-enabled false +Create a DNS zone group with **[az network private-endpoint dns-zone-group create](/cli/azure/network/private-endpoint/dns-zone-group#az-network-private-endpoint-dns-zone-group-create)**. - az network private-endpoint dns-zone-group create \ +```azurecli-interactive +az network private-endpoint dns-zone-group create \ --resource-group CreatePrivateEndpointQS-rg \ --endpoint-name myPrivateEndpoint \ --name MyZoneGroup \ --private-dns-zone "privatelink.azurewebsites.net" \ --zone-name webapp - ``` +``` -## Test connectivity to the private endpoint +## Create a test virtual machine -Finally, use the VM that you created earlier to connect to the SQL Server instance across the private endpoint. +To verify the static IP address and the functionality of the private endpoint, a test virtual machine connected to your virtual network is required. -1. Sign in to the [Azure portal](https://portal.azure.com). - -1. On the left pane, select **Resource groups**. +Create the virtual machine with **[az vm create](/cli/azure/vm#az-vm-create)**. -1. Select **CreatePrivateEndpointQS-rg**. +```azurecli-interactive +az vm create \ + --resource-group CreatePrivateEndpointQS-rg \ + --name myVM \ + --image Win2019Datacenter \ + --public-ip-address "" \ + --vnet-name myVNet \ + --subnet myBackendSubnet \ + --admin-username azureuser +``` -1. Select **myVM**. +[!INCLUDE [ephemeral-ip-note.md](../../includes/ephemeral-ip-note.md)] + +## Test connectivity with the private endpoint + +Use the VM you created in the previous step to connect to the webapp across the private endpoint. + +1. Sign in to the [Azure portal](https://portal.azure.com). + +2. In the search box at the top of the portal, enter **Virtual machine**. Select **Virtual machines**. -1. On the overview page for **myVM**, select **Connect**, and then select **Bastion**. +3. Select **myVM**. -1. Select the blue **Use Bastion** button. +4. On the overview page for **myVM**, select **Connect**, and then select **Bastion**. -1. Enter the username and password that you used when you created the VM. +5. Enter the username and password that you used when you created the VM. Select **Connect**. -1. After you've connected, open PowerShell on the server. +6. After you've connected, open PowerShell on the server. -1. Enter `nslookup .azurewebsites.net`, replacing *\* with the name of the web app that you created earlier. You'll receive a message that's similar to the following: +7. Enter `nslookup mywebapp1979.azurewebsites.net`. Replace **mywebapp1979** with the name of the web app that you created earlier. You'll receive a message that's similar to the following example: ```powershell Server: UnKnown Address: 168.63.129.16 Non-authoritative answer: - Name: mywebapp8675.privatelink.azurewebsites.net - Address: 10.0.0.5 - Aliases: mywebapp8675.azurewebsites.net + Name: mywebapp1979.privatelink.azurewebsites.net + Address: 10.0.0.10 + Aliases: mywebapp1979.azurewebsites.net ``` - A private IP address of *10.0.0.5* is returned for the web app name. This address is in the subnet of the virtual network that you created earlier. +8. In the bastion connection to **myVM**, open the web browser. -1. In the bastion connection to *myVM**, open your web browser. - -1. Enter the URL of your web app, *https://\.azurewebsites.net*. +9. Enter the URL of your web app, **https://mywebapp1979.azurewebsites.net**. If your web app hasn't been deployed, you'll get the following default web app page: - :::image type="content" source="./media/create-private-endpoint-portal/web-app-default-page.png" alt-text="Screenshot of the default web app page on a browser." border="true"::: - -1. Close the connection to *myVM*. - -## Clean up resources - -When you're done using the private endpoint and the VM, use [az group delete](/cli/azure/group#az-group-delete) to remove the resource group and all the resources within it: - -```azurecli-interactive -az group delete \ - --name CreatePrivateEndpointQS-rg -``` - -## What you've learned - -In this quickstart, you created: - -* A virtual network and bastion host -* A virtual machine -* A private endpoint for an Azure web app + :::image type="content" source="./media/create-private-endpoint-portal/web-app-default-page.png" alt-text="Screenshot of the default web app page on a browser." border="true"::: -You used the VM to securely test connectivity to the web app across the private endpoint. +10. Close the connection to **myVM**. ## Next steps diff --git a/articles/private-link/create-private-endpoint-powershell.md b/articles/private-link/create-private-endpoint-powershell.md index 4a56f038f0c7..d2209c4e86c0 100644 --- a/articles/private-link/create-private-endpoint-powershell.md +++ b/articles/private-link/create-private-endpoint-powershell.md @@ -5,7 +5,7 @@ services: private-link author: asudbring ms.service: private-link ms.topic: quickstart -ms.date: 04/22/2022 +ms.date: 05/24/2022 ms.author: allensu ms.custom: devx-track-azurepowershell, mode-api #Customer intent: As someone who has a basic network background but is new to Azure, I want to create a private endpoint by using Azure PowerShell. @@ -16,17 +16,17 @@ Get started with Azure Private Link by using a private endpoint to connect secur In this quickstart, you'll create a private endpoint for an Azure web app and then create and deploy a virtual machine (VM) to test the private connection. -You can create private endpoints for a variety of Azure services, such as Azure SQL and Azure Storage. +You can create private endpoints for various Azure services, such as Azure SQL and Azure Storage. ## Prerequisites -* An Azure account with an active subscription. If you don't already have an Azure account, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +- An Azure account with an active subscription. If you don't already have an Azure account, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -* An Azure web app with a *PremiumV2-tier* or higher app service plan, deployed in your Azure subscription. +- An Azure web app with a *PremiumV2-tier* or higher app service plan, deployed in your Azure subscription. - For more information and an example, see [Quickstart: Create an ASP.NET Core web app in Azure](../app-service/quickstart-dotnetcore.md). + - For more information and an example, see [Quickstart: Create an ASP.NET Core web app in Azure](../app-service/quickstart-dotnetcore.md). - For a detailed tutorial on creating a web app and an endpoint, see [Tutorial: Connect to a web app by using a private endpoint](tutorial-private-endpoint-webapp-portal.md). + - The example webapp in this article is named **myWebApp1979**. Replace the example with your webapp name. If you choose to install and use PowerShell locally, this article requires the Azure PowerShell module version 5.4.1 or later. To find the installed version, run `Get-Module -ListAvailable Az`. If you need to upgrade, see [Install the Azure PowerShell module](/powershell/azure/install-Az-ps). If you're running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. @@ -42,304 +42,287 @@ New-AzResourceGroup -Name 'CreatePrivateEndpointQS-rg' -Location 'eastus' ## Create a virtual network and bastion host -First, you'll create a virtual network, subnet, and bastion host. +A virtual network and subnet is required for to host the private IP address for the private endpoint. You'll create a bastion host to connect securely to the virtual machine to test the private endpoint. You'll create the virtual machine in a later section. -You'll use the bastion host to connect securely to the VM for testing the private endpoint. +In this section, you'll: -1. Create a virtual network and bastion host with: +- Create a virtual network with [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) - * [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) - * [New-AzPublicIpAddress](/powershell/module/az.network/new-azpublicipaddress) - * [New-AzBastion](/powershell/module/az.network/new-azbastion) +- Create subnet configurations for the backend subnet and the bastion subnet with [New-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/new-azvirtualnetworksubnetconfig) -1. Configure the back-end subnet. +- Create a public IP address for the bastion host with [New-AzPublicIpAddress](/powershell/module/az.network/new-azpublicipaddress) - ```azurepowershell-interactive - $subnetConfig = New-AzVirtualNetworkSubnetConfig -Name myBackendSubnet -AddressPrefix 10.0.0.0/24 - ``` +- Create the bastion host with [New-AzBastion](/powershell/module/az.network/new-azbastion) -1. Create the Azure Bastion subnet: - - ```azurepowershell-interactive - $bastsubnetConfig = New-AzVirtualNetworkSubnetConfig -Name AzureBastionSubnet -AddressPrefix 10.0.1.0/24 - ``` - -1. Create the virtual network: - - ```azurepowershell-interactive - $parameters1 = @{ - Name = 'MyVNet' - ResourceGroupName = 'CreatePrivateEndpointQS-rg' - Location = 'eastus' - AddressPrefix = '10.0.0.0/16' - Subnet = $subnetConfig, $bastsubnetConfig - } - $vnet = New-AzVirtualNetwork @parameters1 - ``` - -1. Create the public IP address for the bastion host: - - ```azurepowershell-interactive - $parameters2 = @{ - Name = 'myBastionIP' - ResourceGroupName = 'CreatePrivateEndpointQS-rg' - Location = 'eastus' - Sku = 'Standard' - AllocationMethod = 'Static' - } - $publicip = New-AzPublicIpAddress @parameters2 - ``` - -1. Create the bastion host: - - ```azurepowershell-interactive - $parameters3 = @{ - ResourceGroupName = 'CreatePrivateEndpointQS-rg' - Name = 'myBastion' - PublicIpAddress = $publicip - VirtualNetwork = $vnet - } - New-AzBastion @parameters3 - ``` - -It can take a few minutes for the Azure Bastion host to deploy. - -## Create a test virtual machine - -Next, create a VM that you can use to test the private endpoint. - -1. Create the VM by using: - - * [Get-Credential](/powershell/module/microsoft.powershell.security/get-credential) - * [New-AzNetworkInterface](/powershell/module/az.network/new-aznetworkinterface) - * [New-AzVM](/powershell/module/az.compute/new-azvm) - * [New-AzVMConfig](/powershell/module/az.compute/new-azvmconfig) - * [Set-AzVMOperatingSystem](/powershell/module/az.compute/set-azvmoperatingsystem) - * [Set-AzVMSourceImage](/powershell/module/az.compute/set-azvmsourceimage) - * [Add-AzVMNetworkInterface](/powershell/module/az.compute/add-azvmnetworkinterface) +```azurepowershell-interactive +## Configure the back-end subnet. ## +$subnetConfig = New-AzVirtualNetworkSubnetConfig -Name myBackendSubnet -AddressPrefix 10.0.0.0/24 + +## Create the Azure Bastion subnet. ## +$bastsubnetConfig = New-AzVirtualNetworkSubnetConfig -Name AzureBastionSubnet -AddressPrefix 10.0.1.0/24 + +## Create the virtual network. ## +$net = @{ + Name = 'MyVNet' + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + Location = 'eastus' + AddressPrefix = '10.0.0.0/16' + Subnet = $subnetConfig, $bastsubnetConfig +} +$vnet = New-AzVirtualNetwork @net + +## Create the public IP address for the bastion host. ## +$ip = @{ + Name = 'myBastionIP' + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + Location = 'eastus' + Sku = 'Standard' + AllocationMethod = 'Static' + Zone = 1,2,3 +} +$publicip = New-AzPublicIpAddress @ip + +## Create the bastion host. ## +$bastion = @{ + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + Name = 'myBastion' + PublicIpAddress = $publicip + VirtualNetwork = $vnet +} +New-AzBastion @bastion -AsJob +``` +## Create a private endpoint -1. Get the server admin credentials and password: +An Azure service that supports private endpoints is required to set up the private endpoint and connection to the virtual network. For the examples in this article, we're using an Azure WebApp from the prerequisites. For more information on the Azure services that support a private endpoint, see [Azure Private Link availability](availability.md). - ```azurepowershell-interactive - $cred = Get-Credential - ``` +A private endpoint can have a static or dynamically assigned IP address. -1. Get the virtual network configuration: +> [!IMPORTANT] +> You must have a previously deployed Azure WebApp to proceed with the steps in this article. For more information, see [Prerequisites](#prerequisites). - ```azurepowershell-interactive - $vnet = Get-AzVirtualNetwork -Name myVNet -ResourceGroupName CreatePrivateEndpointQS-rg - ``` +In this section, you'll: -1. Create a network interface for the VM: +- Create a private link service connection with [New-AzPrivateLinkServiceConnection](/powershell/module/az.network/new-azprivatelinkserviceconnection). - ```azurepowershell-interactive - $parameters1 = @{ - Name = 'myNicVM' - ResourceGroupName = 'CreatePrivateEndpointQS-rg' - Location = 'eastus' - Subnet = $vnet.Subnets[0] - } - $nicVM = New-AzNetworkInterface @parameters1 - ``` +- Create the private endpoint with [New-AzPrivateEndpoint](/powershell/module/az.network/new-azprivateendpoint). -1. Configure the VM: - - ```azurepowershell-interactive - $parameters2 = @{ - VMName = 'myVM' - VMSize = 'Standard_DS1_v2' - } - $parameters3 = @{ - ComputerName = 'myVM' - Credential = $cred - } - $parameters4 = @{ - PublisherName = 'MicrosoftWindowsServer' - Offer = 'WindowsServer' - Skus = '2019-Datacenter' - Version = 'latest' - } - $vmConfig = - New-AzVMConfig @parameters2 | Set-AzVMOperatingSystem -Windows @parameters3 | Set-AzVMSourceImage @parameters4 | Add-AzVMNetworkInterface -Id $nicVM.Id - ``` +- Optionally create the private endpoint static IP configuration with [New-AzPrivateEndpointIpConfiguration](/powershell/module/az.network/new-azprivateendpointipconfiguration). -1. Create the VM: +# [**Dynamic IP**](#tab/dynamic-ip) - ```azurepowershell-interactive - New-AzVM -ResourceGroupName 'CreatePrivateEndpointQS-rg' -Location 'eastus' -VM $vmConfig - ``` - -[!INCLUDE [ephemeral-ip-note.md](../../includes/ephemeral-ip-note.md)] +```azurepowershell-interactive +## Place the previously created webapp into a variable. ## +$webapp = Get-AzWebApp -ResourceGroupName CreatePrivateEndpointQS-rg -Name myWebApp1979 + +## Create the private endpoint connection. ## +$pec = @{ + Name = 'myConnection' + PrivateLinkServiceId = $webapp.ID + GroupID = 'sites' +} +$privateEndpointConnection = New-AzPrivateLinkServiceConnection @pec + +## Place the virtual network you created previously into a variable. ## +$vnet = Get-AzVirtualNetwork -ResourceGroupName 'CreatePrivateEndpointQS-rg' -Name 'myVNet' + +## Create the private endpoint. ## +$pe = @{ + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + Name = 'myPrivateEndpoint' + Location = 'eastus' + Subnet = $vnet.Subnets[0] + PrivateLinkServiceConnection = $privateEndpointConnection +} +New-AzPrivateEndpoint @pe -## Create a private endpoint +``` -1. Create a private endpoint and connection by using: +# [**Static IP**](#tab/static-ip) - * [New-AzPrivateLinkServiceConnection](/powershell/module/az.network/New-AzPrivateLinkServiceConnection) - * [New-AzPrivateEndpoint](/powershell/module/az.network/new-azprivateendpoint) +```azurepowershell-interactive +## Place the previously created webapp into a variable. ## +$webapp = Get-AzWebApp -ResourceGroupName CreatePrivateEndpointQS-rg -Name myWebApp1979 + +## Create the private endpoint connection. ## +$pec = @{ + Name = 'myConnection' + PrivateLinkServiceId = $webapp.ID + GroupID = 'sites' +} +$privateEndpointConnection = New-AzPrivateLinkServiceConnection @pec + +## Place the virtual network you created previously into a variable. ## +$vnet = Get-AzVirtualNetwork -ResourceGroupName 'CreatePrivateEndpointQS-rg' -Name 'myVNet' + +## Create the static IP configuration. ## +$ip = @{ + Name = 'myIPconfig' + GroupId = 'sites' + MemberName = 'sites' + PrivateIPAddress = '10.0.0.10' +} +$ipconfig = New-AzPrivateEndpointIpConfiguration @ip + +## Create the private endpoint. ## +$pe = @{ + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + Name = 'myPrivateEndpoint' + Location = 'eastus' + Subnet = $vnet.Subnets[0] + PrivateLinkServiceConnection = $privateEndpointConnection + IpConfiguration = $ipconfig +} +New-AzPrivateEndpoint @pe -1. Place the web app into a variable. Replace \ with the resource group name of your web app, and replace \ with your web app name. +``` - ```azurepowershell-interactive - $webapp = Get-AzWebApp -ResourceGroupName -Name - ``` +--- -1. Create the private endpoint connection: +## Configure the private DNS zone - ```azurepowershell-interactive - $parameters1 = @{ - Name = 'myConnection' - PrivateLinkServiceId = $webapp.ID - GroupID = 'sites' - } - $privateEndpointConnection = New-AzPrivateLinkServiceConnection @parameters1 - ``` +A private DNS zone is used to resolve the DNS name of the private endpoint in the virtual network. For this example, we're using the DNS information for an Azure WebApp, for more information on the DNS configuration of private endpoints, see [Azure Private Endpoint DNS configuration](private-endpoint-dns.md). -1. Place the virtual network into a variable: +In this section, you'll: - ```azurepowershell-interactive - $vnet = Get-AzVirtualNetwork -ResourceGroupName 'CreatePrivateEndpointQS-rg' -Name 'myVNet' - ``` +- Create a new private Azure DNS zone with [New-AzPrivateDnsZone](/powershell/module/az.privatedns/new-azprivatednszone) -1. Disable the private endpoint network policy: +- Link the DNS zone to the virtual network you created previously with [New-AzPrivateDnsVirtualNetworkLink](/powershell/module/az.privatedns/new-azprivatednsvirtualnetworklink) - ```azurepowershell-interactive - $vnet.Subnets[0].PrivateEndpointNetworkPolicies = "Disabled" - $vnet | Set-AzVirtualNetwork - ``` +- Create a DNS zone configuration with [New-AzPrivateDnsZoneConfig](/powershell/module/az.network/new-azprivatednszoneconfig) -1. Create the private endpoint: - - ```azurepowershell-interactive - $parameters2 = @{ - ResourceGroupName = 'CreatePrivateEndpointQS-rg' - Name = 'myPrivateEndpoint' - Location = 'eastus' - Subnet = $vnet.Subnets[0] - PrivateLinkServiceConnection = $privateEndpointConnection - } - New-AzPrivateEndpoint @parameters2 - ``` -## Configure the private DNS zone +- Create a DNS zone group with [New-AzPrivateDnsZoneGroup](/powershell/module/az.network/new-azprivatednszonegroup) -1. Create and configure the private DNS zone by using: +```azurepowershell-interactive +## Place the virtual network into a variable. ## +$vnet = Get-AzVirtualNetwork -ResourceGroupName 'CreatePrivateEndpointQS-rg' -Name 'myVNet' + +## Create the private DNS zone. ## +$zn = @{ + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + Name = 'privatelink.azurewebsites.net' +} +$zone = New-AzPrivateDnsZone @zn + +## Create a DNS network link. ## +$lk = @{ + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + ZoneName = 'privatelink.azurewebsites.net' + Name = 'myLink' + VirtualNetworkId = $vnet.Id +} +$link = New-AzPrivateDnsVirtualNetworkLink @lk + +## Configure the DNS zone. ## +$cg = @{ + Name = 'privatelink.azurewebsites.net' + PrivateDnsZoneId = $zone.ResourceId +} +$config = New-AzPrivateDnsZoneConfig @cg + +## Create the DNS zone group. ## +$zg = @{ + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + PrivateEndpointName = 'myPrivateEndpoint' + Name = 'myZoneGroup' + PrivateDnsZoneConfig = $config +} +New-AzPrivateDnsZoneGroup @zg - * [New-AzPrivateDnsZone](/powershell/module/az.privatedns/new-azprivatednszone) - * [New-AzPrivateDnsVirtualNetworkLink](/powershell/module/az.privatedns/new-azprivatednsvirtualnetworklink) - * [New-AzPrivateDnsZoneConfig](/powershell/module/az.network/new-azprivatednszoneconfig) - * [New-AzPrivateDnsZoneGroup](/powershell/module/az.network/new-azprivatednszonegroup) +``` -1. Place the virtual network into a variable: +## Create a test virtual machine - ```azurepowershell-interactive - $vnet = Get-AzVirtualNetwork -ResourceGroupName 'CreatePrivateEndpointQS-rg' -Name 'myVNet' - ``` +To verify the static IP address and the functionality of the private endpoint, a test virtual machine connected to your virtual network is required. -1. Create the private DNS zone: +In this section, you'll: - ```azurepowershell-interactive - $parameters1 = @{ - ResourceGroupName = 'CreatePrivateEndpointQS-rg' - Name = 'privatelink.azurewebsites.net' - } - $zone = New-AzPrivateDnsZone @parameters1 - ``` +- Create a sign-in credential for the virtual machine with [Get-Credential](/powershell/module/microsoft.powershell.security/get-credential) -1. Create a DNS network link: +- Create a network interface for the virtual machine with [New-AzNetworkInterface](/powershell/module/az.network/new-aznetworkinterface) - ```azurepowershell-interactive - $parameters2 = @{ - ResourceGroupName = 'CreatePrivateEndpointQS-rg' - ZoneName = 'privatelink.azurewebsites.net' - Name = 'myLink' - VirtualNetworkId = $vnet.Id - } - $link = New-AzPrivateDnsVirtualNetworkLink @parameters2 - ``` +- Create a virtual machine configuration with [New-AzVMConfig](/powershell/module/az.compute/new-azvmconfig), [Set-AzVMOperatingSystem](/powershell/module/az.compute/set-azvmoperatingsystem), [Set-AzVMSourceImage](/powershell/module/az.compute/set-azvmsourceimage), and [Add-AzVMNetworkInterface](/powershell/module/az.compute/add-azvmnetworkinterface) -1. Configure the DNS zone: +- Create the virtual machine with [New-AzVM](/powershell/module/az.compute/new-azvm) - ```azurepowershell-interactive - $parameters3 = @{ - Name = 'privatelink.azurewebsites.net' - PrivateDnsZoneId = $zone.ResourceId - } - $config = New-AzPrivateDnsZoneConfig @parameters3 - ``` +```azurepowershell-interactive +## Create the credential for the virtual machine. Enter a username and password at the prompt. ## +$cred = Get-Credential + +## Place the virtual network into a variable. ## +$vnet = Get-AzVirtualNetwork -Name myVNet -ResourceGroupName CreatePrivateEndpointQS-rg + +## Create a network interface for the virtual machine. ## +$nic = @{ + Name = 'myNicVM' + ResourceGroupName = 'CreatePrivateEndpointQS-rg' + Location = 'eastus' + Subnet = $vnet.Subnets[0] +} +$nicVM = New-AzNetworkInterface @nic + +## Create the configuration for the virtual machine. ## +$vm1 = @{ + VMName = 'myVM' + VMSize = 'Standard_DS1_v2' +} +$vm2 = @{ + ComputerName = 'myVM' + Credential = $cred +} +$vm3 = @{ + PublisherName = 'MicrosoftWindowsServer' + Offer = 'WindowsServer' + Skus = '2019-Datacenter' + Version = 'latest' +} +$vmConfig = +New-AzVMConfig @vm1 | Set-AzVMOperatingSystem -Windows @vm2 | Set-AzVMSourceImage @vm3 | Add-AzVMNetworkInterface -Id $nicVM.Id + +## Create the virtual machine. ## +New-AzVM -ResourceGroupName 'CreatePrivateEndpointQS-rg' -Location 'eastus' -VM $vmConfig -1. Create the DNS zone group: +``` - ```azurepowershell-interactive - $parameters4 = @{ - ResourceGroupName = 'CreatePrivateEndpointQS-rg' - PrivateEndpointName = 'myPrivateEndpoint' - Name = 'myZoneGroup' - PrivateDnsZoneConfig = $config - } - New-AzPrivateDnsZoneGroup @parameters4 - ``` +[!INCLUDE [ephemeral-ip-note.md](../../includes/ephemeral-ip-note.md)] ## Test connectivity with the private endpoint -Finally, use the VM you created in the previous step to connect to the SQL server across the private endpoint. +Use the VM you created in the previous step to connect to the webapp across the private endpoint. 1. Sign in to the [Azure portal](https://portal.azure.com). -1. On the left pane, select **Resource groups**. +2. In the search box at the top of the portal, enter **Virtual machine**. Select **Virtual machines**. -1. Select **CreatePrivateEndpointQS-rg**. +3. Select **myVM**. -1. Select **myVM**. +4. On the overview page for **myVM**, select **Connect**, and then select **Bastion**. -1. On the overview page for **myVM**, select **Connect**, and then select **Bastion**. +5. Enter the username and password that you used when you created the VM. Select **Connect**. -1. Select the blue **Use Bastion** button. +6. After you've connected, open PowerShell on the server. -1. Enter the username and password that you used when you created the VM. - -1. After you've connected, open PowerShell on the server. - -1. Enter `nslookup .azurewebsites.net`. Replace **\** with the name of the web app that you created earlier. You'll receive a message that's similar to the following: +7. Enter `nslookup mywebapp1979.azurewebsites.net`. Replace **mywebapp1979** with the name of the web app that you created earlier. You'll receive a message that's similar to the following example: ```powershell Server: UnKnown Address: 168.63.129.16 Non-authoritative answer: - Name: mywebapp8675.privatelink.azurewebsites.net - Address: 10.0.0.5 - Aliases: mywebapp8675.azurewebsites.net + Name: mywebapp1979.privatelink.azurewebsites.net + Address: 10.0.0.10 + Aliases: mywebapp1979.azurewebsites.net ``` - A private IP address of *10.0.0.5* is returned for the web app name. This address is in the subnet of the virtual network that you created earlier. +8. In the bastion connection to **myVM**, open the web browser. -1. In the bastion connection to **myVM**, open your web browser. - -1. Enter the URL of your web app, **https://\.azurewebsites.net**. +9. Enter the URL of your web app, **https://mywebapp1979.azurewebsites.net**. If your web app hasn't been deployed, you'll get the following default web app page: - :::image type="content" source="./media/create-private-endpoint-portal/web-app-default-page.png" alt-text="Screenshot of the default web app page on a browser." border="true"::: - -1. Close the connection to **myVM**. - -## Clean up resources -When you're done using the private endpoint and the VM, use [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) to remove the resource group and all the resources within it: - -```azurepowershell-interactive -Remove-AzResourceGroup -Name CreatePrivateEndpointQS-rg -Force -``` - -## What you've learned - -In this quickstart, you created: - -* A virtual network and bastion host -* A virtual machine -* A private endpoint for an Azure web app + :::image type="content" source="./media/create-private-endpoint-portal/web-app-default-page.png" alt-text="Screenshot of the default web app page on a browser." border="true"::: -You used the VM to securely test connectivity to the web app across the private endpoint. +10. Close the connection to **myVM**. ## Next steps diff --git a/articles/private-link/manage-private-endpoint.md b/articles/private-link/manage-private-endpoint.md index 27238a3346ed..a704002223ca 100644 --- a/articles/private-link/manage-private-endpoint.md +++ b/articles/private-link/manage-private-endpoint.md @@ -1,46 +1,170 @@ --- -title: Manage a Private Endpoint connection in Azure +title: Manage Azure Private Endpoints titleSuffix: Azure Private Link -description: Learn how to manage private endpoint connections in Azure +description: Learn how to manage private endpoints in Azure services: private-link author: asudbring ms.service: private-link ms.topic: how-to -ms.date: 10/04/2021 +ms.date: 05/17/2022 ms.author: allensu ms.custom: devx-track-azurepowershell --- -# Manage a Private Endpoint connection +# Manage Azure Private Endpoints + +Azure Private Endpoints have several options when managing the configuration and their deployment. + +**GroupId** and **MemberName** can be determined by querying the Private Link resource. The **GroupID** and **MemberName** values are needed to configure a static IP address for a private endpoint during creation. + +A private endpoint has two custom properties, static IP address and the network interface name. These properties must be set when the private endpoint is created. + +With a service provider and consumer deployment of a Private Link Service, an approval process is in place to make the connection. + +## Determine GroupID and MemberName + +During the creation of a private endpoint with Azure PowerShell and Azure CLI, the **GroupId** and **MemberName** of the private endpoint resource might be needed. + +* **GroupId** is the subresource of the private endpoint. + +* **MemberName** is the unique stamp for the private IP address of the endpoint. + +For more information about Private Endpoint subresources and their values, see [Private-link resource](private-endpoint-overview.md#private-link-resource). + +To determine the values of **GroupID** and **MemberName** for your private endpoint resource, use the following commands. **MemberName** is contained within the **RequiredMembers** property. + +# [**PowerShell**](#tab/manage-private-link-powershell) + +An Azure WebApp is used as the example private endpoint resource. Use **[Get-AzPrivateLinkResource](/powershell/module/az.network/get-azprivatelinkresource)** to determine **GroupId** and **MemberName**. + +```azurepowershell +## Place the previously created webapp into a variable. ## +$webapp = +Get-AzWebApp -ResourceGroupName myResourceGroup -Name myWebApp1979 + +$resource = +Get-AzPrivateLinkResource -PrivateLinkResourceId $webapp.ID +``` + +You should receive an output similar to the below example. + +:::image type="content" source="./media/manage-private-endpoint/powershell-output.png" alt-text="Screenshot of the PowerShell output of command."::: + +# [**Azure CLI**](#tab/manage-private-link-cli) + +An Azure WebApp is used as the example private endpoint resource. Use **[az network private-link-resource list](/cli/azure/network/private-link-resource#az-network-private-link-resource-list)** to determine **GroupId** and **MemberName**. The parameter `--type` requires the namespace for the private link resource. For the webapp used in this example, the namespace is **Microsoft.Web/sites**. To determine the namespace for your private link resource, see **[Azure services DNS zone configuration](private-endpoint-dns.md#azure-services-dns-zone-configuration)**. + +```azurecli +az network private-link-resource list \ + --resource-group MyResourceGroup \ + --name myWebApp1979 \ + --type Microsoft.Web/sites +``` + +You should receive an output similar to the below example. + +:::image type="content" source="./media/manage-private-endpoint/cli-output.png" alt-text="Screenshot of the PowerShell output of command."::: + +--- + +## Custom properties + +Network interface rename and static IP address assignment are custom properties that can be set on a private endpoint when it's created. + +### Network interface rename + +By default, when a private endpoint is created the network interface associated with the private endpoint is given a random name for its network interface. The network interface must be named when the private endpoint is created. The renaming of the network interface of an existing private endpoint is unsupported. + +Use the following commands when creating a private endpoint to rename the network interface. + +# [**PowerShell**](#tab/manage-private-link-powershell) + +To rename the network interface when the private endpoint is created, use the `-CustomNetworkInterfaceName` parameter. The following example uses an Azure PowerShell command to create a private endpoint to an Azure WebApp. For more information, see **[New-AzPrivateEndpoint](/powershell/module/az.network/new-azprivateendpoint)**. + +```azurepowershell +## Place the previously created webapp into a variable. ## +$webapp = Get-AzWebApp -ResourceGroupName myResourceGroup -Name myWebApp1979 + +## Create the private endpoint connection. ## +$pec = @{ + Name = 'myConnection' + PrivateLinkServiceId = $webapp.ID + GroupID = 'sites' +} +$privateEndpointConnection = New-AzPrivateLinkServiceConnection @pec + +## Place the virtual network you created previously into a variable. ## +$vnet = Get-AzVirtualNetwork -ResourceGroupName 'myResourceGroup' -Name 'myVNet' + +## Create the private endpoint. ## +$pe = @{ + ResourceGroupName = 'myResourceGroup' + Name = 'myPrivateEndpoint' + Location = 'eastus' + Subnet = $vnet.Subnets[0] + PrivateLinkServiceConnection = $privateEndpointConnection + CustomNetworkInterfaceName = 'myPrivateEndpointNIC' +} +New-AzPrivateEndpoint @pe + +``` + +# [**Azure CLI**](#tab/manage-private-link-cli) + +To rename the network interface when the private endpoint is created, use the `--nic-name` parameter. The following example uses an Azure PowerShell command to create a private endpoint to an Azure WebApp. For more information, see **[az network private-endpoint create](/cli/azure/network/private-endpoint#az-network-private-endpoint-create)**. + +```azurecli +id=$(az webapp list \ + --resource-group myResourceGroup \ + --query '[].[id]' \ + --output tsv) + +az network private-endpoint create \ + --connection-name myConnection \ + --name myPrivateEndpoint \ + --private-connection-resource-id $id \ + --resource-group myResourceGroup \ + --subnet myBackendSubnet \ + --group-id sites \ + --nic-name myPrivateEndpointNIC \ + --vnet-name myVNet +``` + +--- + +### Static IP address + +By default, when a private endpoint is created the IP address for the endpoint is automatically assigned. The IP is assigned from the IP range of the virtual network configured for the private endpoint. A situation may arise when a static IP address for the private endpoint is required. The static IP address must be assigned when the private endpoint is created. The configuration of a static IP address for an existing private endpoint is currently unsupported. + +For procedures to configure a static IP address when creating a private endpoint, see [Create a private endpoint using Azure PowerShell](create-private-endpoint-powershell.md) and [Create a private endpoint using the Azure CLI](create-private-endpoint-cli.md). + +## Private endpoint connections Azure Private Link works on an approval model where the Private Link service consumer can request a connection to the service provider for consuming the service. The service provider can then decide whether to allow the consumer to connect or not. Azure Private Link enables service providers to manage the private endpoint connection on their resources. -This article provides instructions about how to manage the Private Endpoint connections. - -![Manage Private Endpoints](media/manage-private-endpoint/manage-private-endpoint.png) +:::image type="content" source="./media/manage-private-endpoint/manage-private-endpoint.png" alt-text="Diagram of Private Link approval methods."::: There are two connection approval methods that a Private Link service consumer can choose from: - **Automatic**: If the service consumer has Azure Role Based Access Control permissions on the service provider resource, the consumer can choose the automatic approval method. When the request reaches the service provider resource, no action is required from the service provider and the connection is automatically approved. - **Manual**: If the service consumer doesn’t have Azure Role Based Access Control permissions on the service provider resource, the consumer can choose the manual approval method. The connection request appears on the service resources as **Pending**. The service provider has to manually approve the request before connections can be established. -In manual cases, service consumer can also specify a message with the request to provide more context to the service provider. The service provider has following options to choose from for all Private Endpoint connections: **Approve**, **Reject**, **Remove**. - -The below table shows the various service provider actions and the resulting connection states for Private Endpoints. The service provider can change the connection state at a later time without consumer intervention. The action will update the state of the endpoint on the consumer side. +In manual cases, service consumer can also specify a message with the request to provide more context to the service provider. The service provider has following options to choose from for all private endpoint connections: **Approve**, **Reject**, **Remove**. +The below table shows the various service provider actions and the resulting connection states for private endpoints. The service provider can change the connection state at a later time without consumer intervention. The action will update the state of the endpoint on the consumer side. -| Service Provider Action | Service Consumer Private Endpoint State | Description | +| Service provider action | Service consumer private endpoint state | Description | |---------|---------|---------| | None | Pending | Connection is created manually and is pending for approval by the Private Link resource owner. | | Approve | Approved | Connection was automatically or manually approved and is ready to be used. | | Reject | Rejected | Connection was rejected by the private link resource owner. | | Remove | Disconnected | Connection was removed by the private link resource owner, the private endpoint becomes informative and should be deleted for clean-up. | -## Manage Private Endpoint connections on Azure PaaS resources +## Manage private endpoint connections on Azure PaaS resources -The Azure portal is the preferred method for managing private endpoint connections on Azure PaaS resources. +Use the following steps to manage a private endpoint connection in the Azure portal. 1. Sign in to the [Azure portal](https://portal.azure.com). @@ -48,67 +172,88 @@ The Azure portal is the preferred method for managing private endpoint connectio 3. In the **Private link center**, select **Private endpoints** or **Private link services**. -4. For each of your endpoints, you can view the number of Private Endpoint connections associated with it. You can filter the resources as needed. +4. For each of your endpoints, you can view the number of private endpoint connections associated with it. You can filter the resources as needed. -5. Select the private endpoint. Under the connections listed, select the connection that you want to manage. +5. Select the private endpoint. Under the connections listed, select the connection that you want to manage. 6. You can change the state of the connection by selecting from the options at the top. ## Manage Private Endpoint connections on a customer/partner owned Private Link service -Azure PowerShell and Azure CLI are the preferred methods for managing Private Endpoint connections on Microsoft Partner Services or customer owned services. +Use the following PowerShell and Azure CLI commands to manage private endpoint connections on Microsoft Partner Services or customer owned services. -### PowerShell - +# [**PowerShell**](#tab/manage-private-link-powershell) + Use the following PowerShell commands to manage private endpoint connections. -#### Get Private Link connection states +## Get Private Link connection states -Use [Get-AzPrivateEndpointConnection](/powershell/module/az.network/get-azprivateendpointconnection) to get the Private Endpoint connections and their states. +Use **[Get-AzPrivateEndpointConnection](/powershell/module/az.network/get-azprivateendpointconnection)** to get the Private Endpoint connections and their states. ```azurepowershell -Get-AzPrivateEndpointConnection -Name myPrivateLinkService -ResourceGroupName myResourceGroup +$get = @{ + Name = 'myPrivateLinkService' + ResourceGroupName = 'myResourceGroup' +} +Get-AzPrivateEndpointConnection @get ``` - -#### Approve a Private Endpoint connection - -Use [Approve-AzPrivateEndpointConnection](/powershell/module/az.network/approve-azprivateendpointconnection) cmdlet to approve a Private Endpoint connection. - + +## Approve a Private Endpoint connection + +Use **[Approve-AzPrivateEndpointConnection](/powershell/module/az.network/approve-azprivateendpointconnection)** cmdlet to approve a Private Endpoint connection. + ```azurepowershell -Approve-AzPrivateEndpointConnection -Name myPrivateEndpointConnection -ResourceGroupName myResourceGroup -ServiceName myPrivateLinkService +$approve = @{ + Name = 'myPrivateEndpointConnection' + ServiceName = 'myPrivateLinkService' + ResourceGroupName = 'myResourceGroup' +} +Approve-AzPrivateEndpointConnection @approve ``` - -#### Deny Private Endpoint connection - -Use [Deny-AzPrivateEndpointConnection](/powershell/module/az.network/deny-azprivateendpointconnection) cmdlet to reject a Private Endpoint connection. + +## Deny Private Endpoint connection + +Use **[Deny-AzPrivateEndpointConnection](/powershell/module/az.network/deny-azprivateendpointconnection)** cmdlet to reject a Private Endpoint connection. ```azurepowershell -Deny-AzPrivateEndpointConnection -Name myPrivateEndpointConnection -ResourceGroupName myResourceGroup -ServiceName myPrivateLinkService +$deny = @{ + Name = 'myPrivateEndpointConnection' + ServiceName = 'myPrivateLinkService' + ResourceGroupName = 'myResourceGroup' +} +Deny-AzPrivateEndpointConnection @deny ``` -#### Remove Private Endpoint connection - -Use [Remove-AzPrivateEndpointConnection](/powershell/module/az.network/remove-azprivateendpointconnection) cmdlet to remove a Private Endpoint connection. +## Remove Private Endpoint connection + +Use **[Remove-AzPrivateEndpointConnection](/powershell/module/az.network/remove-azprivateendpointconnection)** cmdlet to remove a Private Endpoint connection. ```azurepowershell -Remove-AzPrivateEndpointConnection -Name myPrivateEndpointConnection -ResourceGroupName myResourceGroup -ServiceName myPrivateLinkService +$remove = @{ + Name = 'myPrivateEndpointConnection' + ServiceName = 'myPrivateLinkService' + ResourceGroupName = 'myResourceGroup' +} +Remove-AzPrivateEndpointConnection @remove ``` - -### Azure CLI - -#### Get Private Link connection states -Use [az network private-endpoint-connection show](/cli/azure/network/private-endpoint-connection#az-network-private-endpoint-connection-show) to get the Private Endpoint connections and their states. +# [**Azure CLI**](#tab/manage-private-link-cli) + +Use the following Azure CLI commands to manage private endpoint connections. + +## Get Private Link connection states + +Use **[az network private-endpoint-connection show](/cli/azure/network/private-endpoint-connection#az-network-private-endpoint-connection-show)** to get the Private Endpoint connections and their states. ```azurecli az network private-endpoint-connection show \ --name myPrivateEndpointConnection \ --resource-group myResourceGroup ``` + +## Approve a Private Endpoint connection -#### Approve a Private Endpoint connection - -Use [az network private-endpoint-connection approve](/cli/azure/network/private-endpoint-connection#az-network-private-endpoint-connection-approve) cmdlet to approve a Private Endpoint connection. +Use **[az network private-endpoint-connection approve](/cli/azure/network/private-endpoint-connection#az-network-private-endpoint-connection-approve)** cmdlet to approve a Private Endpoint connection. ```azurecli az network private-endpoint-connection approve \ @@ -116,9 +261,9 @@ Use [az network private-endpoint-connection approve](/cli/azure/network/private- --resource-group myResourceGroup ``` -#### Deny Private Endpoint connection +## Deny Private Endpoint connection -Use [az network private-endpoint-connection reject](/cli/azure/network/private-endpoint-connection#az-network-private-endpoint-connection-reject) cmdlet to reject a Private Endpoint connection. +Use **[az network private-endpoint-connection reject](/cli/azure/network/private-endpoint-connection#az-network-private-endpoint-connection-reject)** cmdlet to reject a Private Endpoint connection. ```azurecli az network private-endpoint-connection reject \ @@ -126,9 +271,9 @@ Use [az network private-endpoint-connection reject](/cli/azure/network/private-e --resource-group myResourceGroup ``` -#### Remove Private Endpoint connection +## Remove Private Endpoint connection -Use [az network private-endpoint-connection delete](/cli/azure/network/private-endpoint-connection#az-network-private-endpoint-connection-delete) cmdlet to remove a Private Endpoint connection. +Use **[az network private-endpoint-connection delete](/cli/azure/network/private-endpoint-connection#az-network-private-endpoint-connection-delete)** cmdlet to remove a Private Endpoint connection. ```azurecli az network private-endpoint-connection delete \ @@ -136,6 +281,8 @@ Use [az network private-endpoint-connection delete](/cli/azure/network/private-e --resource-group myResourceGroup ``` +--- + ## Next steps - [Learn about Private Endpoints](private-endpoint-overview.md) diff --git a/articles/private-link/media/create-private-endpoint-portal/web-app-default-page.png b/articles/private-link/media/create-private-endpoint-portal/web-app-default-page.png index 1af8d3d9f34c..7f54289e3e59 100644 Binary files a/articles/private-link/media/create-private-endpoint-portal/web-app-default-page.png and b/articles/private-link/media/create-private-endpoint-portal/web-app-default-page.png differ diff --git a/articles/private-link/media/manage-private-endpoint/cli-output.png b/articles/private-link/media/manage-private-endpoint/cli-output.png new file mode 100644 index 000000000000..15f7817c0693 Binary files /dev/null and b/articles/private-link/media/manage-private-endpoint/cli-output.png differ diff --git a/articles/private-link/media/manage-private-endpoint/powershell-output.png b/articles/private-link/media/manage-private-endpoint/powershell-output.png new file mode 100644 index 000000000000..7b91aa1ce888 Binary files /dev/null and b/articles/private-link/media/manage-private-endpoint/powershell-output.png differ diff --git a/articles/private-link/media/private-endpoint-static-ip-powershell/web-app-default-page.png b/articles/private-link/media/private-endpoint-static-ip-powershell/web-app-default-page.png deleted file mode 100644 index 7f54289e3e59..000000000000 Binary files a/articles/private-link/media/private-endpoint-static-ip-powershell/web-app-default-page.png and /dev/null differ diff --git a/articles/private-link/private-endpoint-static-ip-powershell.md b/articles/private-link/private-endpoint-static-ip-powershell.md deleted file mode 100644 index 6239c9428a68..000000000000 --- a/articles/private-link/private-endpoint-static-ip-powershell.md +++ /dev/null @@ -1,303 +0,0 @@ ---- -title: Create a private endpoint with a static IP address - PowerShell -titleSuffix: Azure Private Link -description: Learn how to create a private endpoint for an Azure service with a static private IP address. -author: asudbring -ms.author: allensu -ms.service: private-link -ms.topic: how-to -ms.date: 05/13/2022 -ms.custom: ---- - -# Create a private endpoint with a static IP address using PowerShell - - A private endpoint IP address is allocated by DHCP in your virtual network by default. In this article, you'll create a private endpoint with a static IP address. - -## Prerequisites - -- An Azure account with an active subscription. If you don't already have an Azure account, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). - -- An Azure web app with a **PremiumV2-tier** or higher app service plan, deployed in your Azure subscription. - - - For more information and an example, see [Quickstart: Create an ASP.NET Core web app in Azure](../app-service/quickstart-dotnetcore.md). - - - The example webapp in this article is named **myWebApp1979**. Replace the example with your webapp name. - -If you choose to install and use PowerShell locally, this article requires the Azure PowerShell module version 5.4.1 or later. To find the installed version, run `Get-Module -ListAvailable Az`. If you need to upgrade, see [Install the Azure PowerShell module](/powershell/azure/install-Az-ps). If you're running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Create a resource group - -An Azure resource group is a logical container where Azure resources are deployed and managed. - -Create a resource group with [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup): - -```azurepowershell-interactive -New-AzResourceGroup -Name 'myResourceGroup' -Location 'eastus' -``` - -## Create a virtual network and bastion host - -A virtual network and subnet is required for to host the private IP address for the private endpoint. You'll create a bastion host to connect securely to the virtual machine to test the private endpoint. You'll create the virtual machine in a later section. - -In this section, you'll: - -- Create a virtual network with [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) - -- Create subnet configurations for the backend subnet and the bastion subnet with [New-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/new-azvirtualnetworksubnetconfig) - -- Create a public IP address for the bastion host with [New-AzPublicIpAddress](/powershell/module/az.network/new-azpublicipaddress) - -- Create the bastion host with [New-AzBastion](/powershell/module/az.network/new-azbastion) - -```azurepowershell-interactive -## Configure the back-end subnet. ## -$subnetConfig = New-AzVirtualNetworkSubnetConfig -Name myBackendSubnet -AddressPrefix 10.0.0.0/24 - -## Create the Azure Bastion subnet. ## -$bastsubnetConfig = New-AzVirtualNetworkSubnetConfig -Name AzureBastionSubnet -AddressPrefix 10.0.1.0/24 - -## Create the virtual network. ## -$net = @{ - Name = 'MyVNet' - ResourceGroupName = 'myResourceGroup' - Location = 'eastus' - AddressPrefix = '10.0.0.0/16' - Subnet = $subnetConfig, $bastsubnetConfig -} -$vnet = New-AzVirtualNetwork @net - -## Create the public IP address for the bastion host. ## -$ip = @{ - Name = 'myBastionIP' - ResourceGroupName = 'myResourceGroup' - Location = 'eastus' - Sku = 'Standard' - AllocationMethod = 'Static' - Zone = 1,2,3 -} -$publicip = New-AzPublicIpAddress @ip - -## Create the bastion host. ## -$bastion = @{ - ResourceGroupName = 'myResourceGroup' - Name = 'myBastion' - PublicIpAddress = $publicip - VirtualNetwork = $vnet -} -New-AzBastion @bastion -AsJob -``` - -## Create a private endpoint - -An Azure service that supports private endpoints is required to setup the private endpoint and connection to the virtual network. For the examples in this article, we are using an Azure WebApp from the prerequisites. For more information on the Azure services that support a private endpoint, see [Azure Private Link availability](availability.md). - -> [!IMPORTANT] -> You must have a previously deployed Azure WebApp to proceed with the steps in this article. See [Prerequisites](#prerequisites) for more information. - -In this section, you'll: - -- Create a private link service connection with [New-AzPrivateLinkServiceConnection](/powershell/module/az.network/new-azprivatelinkserviceconnection). - -- Create the private endpoint static IP configuration with [New-AzPrivateEndpointIpConfiguration](/powershell/module/az.network/new-azprivateendpointipconfiguration). - -- Create the private endpoint with [New-AzPrivateEndpoint](/powershell/module/az.network/new-azprivateendpoint). - -```azurepowershell-interactive -## Place the previously created webapp into a variable. ## -$webapp = Get-AzWebApp -ResourceGroupName myResourceGroup -Name myWebApp1979 - -## Create the private endpoint connection. ## -$pec = @{ - Name = 'myConnection' - PrivateLinkServiceId = $webapp.ID - GroupID = 'sites' -} -$privateEndpointConnection = New-AzPrivateLinkServiceConnection @pec - -## Place the virtual network you created previously into a variable. ## -$vnet = Get-AzVirtualNetwork -ResourceGroupName 'myResourceGroup' -Name 'myVNet' - -## Disable the private endpoint network policy. ## -$vnet.Subnets[0].PrivateEndpointNetworkPolicies = "Disabled" -$vnet | Set-AzVirtualNetwork - -## Create the static IP configuration. ## -$ip = @{ - Name = 'myIPconfig' - GroupId = 'sites' - MemberName = 'sites' - PrivateIPAddress = '10.0.0.10' -} -$ipconfig = New-AzPrivateEndpointIpConfiguration @ip - -## Create the private endpoint. ## -$pe = @{ - ResourceGroupName = 'myResourceGroup' - Name = 'myPrivateEndpoint' - Location = 'eastus' - Subnet = $vnet.Subnets[0] - PrivateLinkServiceConnection = $privateEndpointConnection - IpConfiguration = $ipconfig -} -New-AzPrivateEndpoint @pe - -``` - -## Configure the private DNS zone - -A private DNS zone is used to resolve the DNS name of the private endpoint in the virtual network. For this example, we are using the DNS information for an Azure WebApp, for more information on the DNS configuration of private endpoints, see [Azure Private Endpoint DNS configuration](private-endpoint-dns.md)]. - -In this section, you'll: - -- Create a new private Azure DNS zone with [New-AzPrivateDnsZone](/powershell/module/az.privatedns/new-azprivatednszone) - -- Link the DNS zone to the virtual network you created previously with [New-AzPrivateDnsVirtualNetworkLink](/powershell/module/az.privatedns/new-azprivatednsvirtualnetworklink) - -- Create a DNS zone configuration with [New-AzPrivateDnsZoneConfig](/powershell/module/az.network/new-azprivatednszoneconfig) - -- Create a DNS zone group with [New-AzPrivateDnsZoneGroup](/powershell/module/az.network/new-azprivatednszonegroup) - -```azurepowershell-interactive -## Place the virtual network into a variable. ## -$vnet = Get-AzVirtualNetwork -ResourceGroupName 'myResourceGroup' -Name 'myVNet' - -## Create the private DNS zone. ## -$zn = @{ - ResourceGroupName = 'myResourceGroup' - Name = 'privatelink.azurewebsites.net' -} -$zone = New-AzPrivateDnsZone @zn - -## Create a DNS network link. ## -$lk = @{ - ResourceGroupName = 'myResourceGroup' - ZoneName = 'privatelink.azurewebsites.net' - Name = 'myLink' - VirtualNetworkId = $vnet.Id -} -$link = New-AzPrivateDnsVirtualNetworkLink @lk - -## Configure the DNS zone. ## -$cg = @{ - Name = 'privatelink.azurewebsites.net' - PrivateDnsZoneId = $zone.ResourceId -} -$config = New-AzPrivateDnsZoneConfig @cg - -## Create the DNS zone group. ## -$zg = @{ - ResourceGroupName = 'myResourceGroup' - PrivateEndpointName = 'myPrivateEndpoint' - Name = 'myZoneGroup' - PrivateDnsZoneConfig = $config -} -New-AzPrivateDnsZoneGroup @zg - -``` - -## Create a test virtual machine - -To verify the static IP address and the functionality of the private endpoint, a test virtual machine connected to your virtual network is required. - -In this section, you'll: - -- Create a login credential for the virtual machine with [Get-Credential](/powershell/module/microsoft.powershell.security/get-credential) - -- Create a network interface for the virtual machine with [New-AzNetworkInterface](/powershell/module/az.network/new-aznetworkinterface) - -- Create a virtual machine configuration with [New-AzVMConfig](/powershell/module/az.compute/new-azvmconfig), [Set-AzVMOperatingSystem](/powershell/module/az.compute/set-azvmoperatingsystem), [Set-AzVMSourceImage](/powershell/module/az.compute/set-azvmsourceimage), and [Add-AzVMNetworkInterface](/powershell/module/az.compute/add-azvmnetworkinterface) - -- Create the virtual machine with [New-AzVM](/powershell/module/az.compute/new-azvm) - -```azurepowershell-interactive -## Create the credential for the virtual machine. Enter a username and password at the prompt. ## -$cred = Get-Credential - -## Place the virtual network into a variable. ## -$vnet = Get-AzVirtualNetwork -Name myVNet -ResourceGroupName myResourceGroup - -## Create a network interface for the virtual machine. ## -$nic = @{ - Name = 'myNicVM' - ResourceGroupName = 'myResourceGroup' - Location = 'eastus' - Subnet = $vnet.Subnets[0] -} -$nicVM = New-AzNetworkInterface @nic - -## Create the configuration for the virtual machine. ## -$vm1 = @{ - VMName = 'myVM' - VMSize = 'Standard_DS1_v2' -} -$vm2 = @{ - ComputerName = 'myVM' - Credential = $cred -} -$vm3 = @{ - PublisherName = 'MicrosoftWindowsServer' - Offer = 'WindowsServer' - Skus = '2019-Datacenter' - Version = 'latest' -} -$vmConfig = -New-AzVMConfig @vm1 | Set-AzVMOperatingSystem -Windows @vm2 | Set-AzVMSourceImage @vm3 | Add-AzVMNetworkInterface -Id $nicVM.Id - -## Create the virtual machine. ## -New-AzVM -ResourceGroupName 'myResourceGroup' -Location 'eastus' -VM $vmConfig - -``` - -[!INCLUDE [ephemeral-ip-note.md](../../includes/ephemeral-ip-note.md)] - -## Test connectivity with the private endpoint - -Use the VM you created in the previous step to connect to the webapp across the private endpoint. - -1. Sign in to the [Azure portal](https://portal.azure.com). - -2. In the search box at the top of the portal, enter **Virtual machine**. Select **Virtual machines**. - -3. Select **myVM**. - -4. On the overview page for **myVM**, select **Connect**, and then select **Bastion**. - -5. Enter the username and password that you used when you created the VM. Select **Connect**. - -6. After you've connected, open PowerShell on the server. - -7. Enter `nslookup mywebapp1979.azurewebsites.net`. Replace **mywebapp1979** with the name of the web app that you created earlier. You'll receive a message that's similar to the following: - - ```powershell - Server: UnKnown - Address: 168.63.129.16 - - Non-authoritative answer: - Name: mywebapp1979.privatelink.azurewebsites.net - Address: 10.0.0.10 - Aliases: mywebapp1979.azurewebsites.net - ``` - - A static private IP address of *10.0.0.10* is returned for the web app name. - -8. In the bastion connection to **myVM**, open the web browser. - -9. Enter the URL of your web app, **https://mywebapp1979.azurewebsites.net**. - - If your web app hasn't been deployed, you'll get the following default web app page: - - :::image type="content" source="./media/private-endpoint-static-ip-powershell/web-app-default-page.png" alt-text="Screenshot of the default web app page on a browser." border="true"::: - -10. Close the connection to **myVM**. - -## Next steps - -To learn more about Private Link and Private endpoints, see - -- [What is Azure Private Link](private-link-overview.md) - -- [Private endpoint overview](private-endpoint-overview.md) - - - diff --git a/articles/private-link/toc.yml b/articles/private-link/toc.yml index 826cff3b477c..89969e99b2d1 100644 --- a/articles/private-link/toc.yml +++ b/articles/private-link/toc.yml @@ -65,10 +65,6 @@ href: /security/benchmark/azure/baselines/private-link-security-baseline?toc=/azure/private-link/toc.json - name: How-to items: - - name: Private endpoint with static IP address - items: - - name: PowerShell - href: private-endpoint-static-ip-powershell.md - name: Export private endpoint DNS records href: private-endpoint-export-dns.md - name: Manage network policies for private endpoints diff --git a/articles/purview/concept-workflow.md b/articles/purview/concept-workflow.md index c21f02e90893..4426e92ca7d6 100644 --- a/articles/purview/concept-workflow.md +++ b/articles/purview/concept-workflow.md @@ -12,7 +12,7 @@ ms.custom: template-concept # Workflows in Microsoft Purview -[!INCLUDE [Region Notice](./includes/workflow-regions.md)] +[!INCLUDE [feature-in-preview](includes/feature-in-preview.md)] Workflows are automated, repeatable business processes that users can create within Microsoft Purview to validate and orchestrate CUD (create, update, delete) operations on their data entities. Enabling these processes allow organizations to track changes, enforce policy compliance, and ensure quality data across their data landscape. diff --git a/articles/purview/frequently-asked-questions.yml b/articles/purview/frequently-asked-questions.yml index ddd79971bbf0..3f973e2cf58c 100644 --- a/articles/purview/frequently-asked-questions.yml +++ b/articles/purview/frequently-asked-questions.yml @@ -1,13 +1,13 @@ ### YamlMime:FAQ metadata: title: Frequently asked questions (FAQ) - description: This article answers frequently asked questions about Microsoft Purview. + description: This article answers frequently asked questions about Microsoft Purview (formerly Azure Purview). author: SunetraVirdi ms.author: suvirdi ms.service: purview ms.topic: faq ms.date: 05/08/2021 -title: Frequently asked questions (FAQ) about Microsoft Purview +title: Frequently asked questions (FAQ) about Microsoft Purview (formerly Azure Purview) summary: | sections: diff --git a/articles/purview/how-to-data-owner-policies-arc-sql-server.md b/articles/purview/how-to-data-owner-policies-arc-sql-server.md index 533122a1b187..edbd4f071d43 100644 --- a/articles/purview/how-to-data-owner-policies-arc-sql-server.md +++ b/articles/purview/how-to-data-owner-policies-arc-sql-server.md @@ -6,7 +6,7 @@ ms.author: vlrodrig ms.service: purview ms.subservice: purview-data-policies ms.topic: how-to -ms.date: 05/16/2022 +ms.date: 05/25/2022 ms.custom: references_regions, event-tier1-build-2022 --- # Provision access by data owner for SQL Server on Azure Arc-enabled servers (preview) @@ -22,9 +22,10 @@ This how-to guide describes how a data owner can delegate authoring policies in - SQL server version 2022 CTP 2.0 or later - Complete process to onboard that SQL server with Azure Arc and enable Azure AD Authentication. [Follow this guide to learn how](https://aka.ms/sql-on-arc-AADauth). -**Enforcement of policies is available only in the following regions for Microsoft Purview** +**Enforcement of policies for this data source is available only in the following regions for Microsoft Purview** - East US - UK South +- Australia East ## Security considerations - The Server admin can turn off the Microsoft Purview policy enforcement. @@ -35,8 +36,8 @@ This how-to guide describes how a data owner can delegate authoring policies in ## Configuration [!INCLUDE [Access policies generic configuration](./includes/access-policies-configuration-generic.md)] -> [!Warning] -> "Access control (IAM)" for the "SQL Server - Azure Arc" resource is visible in Azure portal only through a special link, described in the next section. You can assign the *IAM Owner* permission by entering Azure portal through that special link. You can alternatively configure this permission at the parent resource group level so that it gets inherited by this data source. +> [!Important] +> You can assign the data source side permission (i.e., *IAM Owner*) **only** by entering Azure portal through this [special link](https://portal.azure.com/?feature.canmodifystamps=true&Microsoft_Azure_HybridData_Platform=sqlrbacmain#blade/Microsoft_Azure_HybridCompute/AzureArcCenterBlade/sqlServers). Alternatively, you can configure this permission at the parent resource group level so that it gets inherited by the "SQL Server - Azure Arc" data source. ### SQL Server on Azure Arc-enabled server configuration This section describes the steps to configure the SQL Server on Azure Arc to use Microsoft Purview. @@ -52,9 +53,9 @@ This section describes the steps to configure the SQL Server on Azure Arc to use 1. Set **External Policy Based Authorization** to enabled -1. Enter **Microsoft Purview Endpoint** in the format *https://\.purview.azure.com*. Note: you can get the endpoint by selecting your Microsoft Purview account through [this link](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.Purview%2FAccounts) and then navigating to the Properties section on the left menu. Scroll down until you see "Scan endpoint". Copy the listed endpoint but remove "/Scan" at the end. +1. Enter **Microsoft Purview Endpoint** in the format *https://\.purview.azure.com*. You can see the names of Microsoft Purview accounts in your tenant through [this link](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.Purview%2FAccounts). Optionally, you can confirm the endpoint by navigating to the Microsoft Purview account, then to the Properties section on the left menu and scrolling down until you see "Scan endpoint". The full endpoint path will be the one listed without the "/Scan" at the end. -1. Make a note of the **App registration ID**, as you will need it when you register this data source for *Data use Management* in Microsoft Purview. +1. Make a note of the **App registration ID**, as you will need it when you register and enable this data source for *Data use Management* in Microsoft Purview. 1. Select the **Save** button to save the configuration. @@ -165,7 +166,7 @@ This section contains a reference of how actions in Microsoft Purview data polic ## Next steps Check blog, demo and related how-to guides -* [Demo of access policy for Azure Storage](/video/media/8ce7c554-0d48-430f-8f63-edf94946947c/purview-policy-storage-dataowner-scenario_mid.mp4) +* [Demo of access policy for Azure Storage](https://learn-video.azurefd.net/vod/player?id=caa25ad3-7927-4dcc-88dd-6b74bcae98a2) * [Concepts for Microsoft Purview data owner policies](./concept-data-owner-policies.md) * Blog: [Private preview: controlling access to Azure SQL at scale with policies in Purview](https://techcommunity.microsoft.com/t5/azure-sql-blog/private-preview-controlling-access-to-azure-sql-at-scale-with/ba-p/2945491) * [Enable Microsoft Purview data owner policies on all data sources in a subscription or a resource group](./how-to-data-owner-policies-resource-group.md) diff --git a/articles/purview/how-to-data-owner-policies-azure-sql-db.md b/articles/purview/how-to-data-owner-policies-azure-sql-db.md index d404f8ae92e6..64d1c6e90e3d 100644 --- a/articles/purview/how-to-data-owner-policies-azure-sql-db.md +++ b/articles/purview/how-to-data-owner-policies-azure-sql-db.md @@ -149,7 +149,7 @@ This section contains a reference of how actions in Microsoft Purview data polic ## Next steps Check blog, demo and related how-to guides -* [Demo of access policy for Azure Storage](/video/media/8ce7c554-0d48-430f-8f63-edf94946947c/purview-policy-storage-dataowner-scenario_mid.mp4) +* [Demo of access policy for Azure Storage](https://learn-video.azurefd.net/vod/player?id=caa25ad3-7927-4dcc-88dd-6b74bcae98a2) * [Concepts for Microsoft Purview data owner policies](./concept-data-owner-policies.md) * Blog: [Private preview: controlling access to Azure SQL at scale with policies in Purview](https://techcommunity.microsoft.com/t5/azure-sql-blog/private-preview-controlling-access-to-azure-sql-at-scale-with/ba-p/2945491) * [Enable Microsoft Purview data owner policies on all data sources in a subscription or a resource group](./how-to-data-owner-policies-resource-group.md) diff --git a/articles/purview/how-to-data-owner-policies-resource-group.md b/articles/purview/how-to-data-owner-policies-resource-group.md index 94be678f1e3b..7f86db46f039 100644 --- a/articles/purview/how-to-data-owner-policies-resource-group.md +++ b/articles/purview/how-to-data-owner-policies-resource-group.md @@ -74,4 +74,4 @@ Check blog, demo and related tutorials: * [Concepts for Microsoft Purview data owner policies](./concept-data-owner-policies.md) * [Blog: resource group-level governance can significantly reduce effort](https://techcommunity.microsoft.com/t5/azure-purview-blog/data-policy-features-resource-group-level-governance-can/ba-p/3096314) -* [Video: Demo of data owner access policies for Azure Storage](https://www.youtube.com/watch?v=CFE8ltT19Ss) +* [Video: Demo of data owner access policies for Azure Storage](https://learn-video.azurefd.net/vod/player?id=caa25ad3-7927-4dcc-88dd-6b74bcae98a2) diff --git a/articles/purview/how-to-data-owner-policies-storage.md b/articles/purview/how-to-data-owner-policies-storage.md index 57eadb339761..4422b403ac33 100644 --- a/articles/purview/how-to-data-owner-policies-storage.md +++ b/articles/purview/how-to-data-owner-policies-storage.md @@ -94,7 +94,7 @@ This section contains a reference of how actions in Microsoft Purview data polic ## Next steps Check blog, demo and related tutorials: -* [Demo of access policy for Azure Storage](https://www.youtube.com/watch?v=CFE8ltT19Ss) +* [Demo of access policy for Azure Storage](https://learn-video.azurefd.net/vod/player?id=caa25ad3-7927-4dcc-88dd-6b74bcae98a2) * [Concepts for Microsoft Purview data owner policies](./concept-data-owner-policies.md) * [Enable Microsoft Purview data owner policies on all data sources in a subscription or a resource group](./how-to-data-owner-policies-resource-group.md) * [Blog: What's New in Microsoft Purview at Microsoft Ignite 2021](https://techcommunity.microsoft.com/t5/azure-purview/what-s-new-in-azure-purview-at-microsoft-ignite-2021/ba-p/2915954) diff --git a/articles/purview/how-to-request-access.md b/articles/purview/how-to-request-access.md index 726554d9d3a4..d89be9822179 100644 --- a/articles/purview/how-to-request-access.md +++ b/articles/purview/how-to-request-access.md @@ -12,7 +12,7 @@ ms.custom: template-how-to #Required; leave this attribute/value as-is. # How to request access for a data asset -[!INCLUDE [Region Notice](./includes/workflow-regions.md)] +[!INCLUDE [feature-in-preview](includes/feature-in-preview.md)] If you discover a data asset in the catalog that you would like to access, you can request access directly through Azure Purview. The request will trigger a workflow that will request that the owners of the data resource grant you access to that data source. diff --git a/articles/purview/how-to-workflow-business-terms-approval.md b/articles/purview/how-to-workflow-business-terms-approval.md index 22f2b6ceed66..2fa05fb42968 100644 --- a/articles/purview/how-to-workflow-business-terms-approval.md +++ b/articles/purview/how-to-workflow-business-terms-approval.md @@ -13,7 +13,7 @@ ms.custom: template-how-to #Required; leave this attribute/value as-is. # Approval workflow for business terms -[!INCLUDE [Region Notice](./includes/workflow-regions.md)] +[!INCLUDE [feature-in-preview](includes/feature-in-preview.md)] This guide will take you through the creation and management of approval workflows for business terms. diff --git a/articles/purview/how-to-workflow-manage-requests-approvals.md b/articles/purview/how-to-workflow-manage-requests-approvals.md index 5182fb68e6ad..082a23b03c8d 100644 --- a/articles/purview/how-to-workflow-manage-requests-approvals.md +++ b/articles/purview/how-to-workflow-manage-requests-approvals.md @@ -12,7 +12,7 @@ ms.custom: template-how-to #Required; leave this attribute/value as-is. # Manage workflow requests and approvals -[!INCLUDE [Region Notice](./includes/workflow-regions.md)] +[!INCLUDE [feature-in-preview](includes/feature-in-preview.md)] This article outlines how to manage requests and approvals that generated by a [workflow](concept-workflow.md) in Microsoft Purview. diff --git a/articles/purview/how-to-workflow-manage-runs.md b/articles/purview/how-to-workflow-manage-runs.md index 8e4d6fc8e422..4e780018e49f 100644 --- a/articles/purview/how-to-workflow-manage-runs.md +++ b/articles/purview/how-to-workflow-manage-runs.md @@ -12,7 +12,7 @@ ms.custom: template-how-to #Required; leave this attribute/value as-is. # Manage workflow runs -[!INCLUDE [Region Notice](./includes/workflow-regions.md)] +[!INCLUDE [feature-in-preview](includes/feature-in-preview.md)] This article outlines how to manage workflows that are already running. diff --git a/articles/purview/how-to-workflow-self-service-data-access-hybrid.md b/articles/purview/how-to-workflow-self-service-data-access-hybrid.md index a36a6ebda7aa..1accb0ccb5c4 100644 --- a/articles/purview/how-to-workflow-self-service-data-access-hybrid.md +++ b/articles/purview/how-to-workflow-self-service-data-access-hybrid.md @@ -12,7 +12,7 @@ ms.custom: template-how-to #Required; leave this attribute/value as-is. # Self-service access workflows for hybrid data estates -[!INCLUDE [Region Notice](./includes/workflow-regions.md)] +[!INCLUDE [feature-in-preview](includes/feature-in-preview.md)] [Workflows](concept-workflow.md) allow you to automate some business processes through Azure Purview. Self-service access workflows allow you to create a process for your users to request access to datasets they've discovered in Azure Purview! diff --git a/articles/purview/includes/access-policies-configuration-generic.md b/articles/purview/includes/access-policies-configuration-generic.md index 15e2ea0ff243..e91f30520463 100644 --- a/articles/purview/includes/access-policies-configuration-generic.md +++ b/articles/purview/includes/access-policies-configuration-generic.md @@ -4,7 +4,7 @@ ms.author: vlrodrig ms.service: purview ms.subservice: purview-data-policies ms.topic: include -ms.date: 05/23/2022 +ms.date: 05/24/2022 ms.custom: --- @@ -22,7 +22,7 @@ This section discusses the permissions needed to: - Author and publish policies in Microsoft Purview. >[!IMPORTANT] -> Currently, Microsoft Purview roles related to policy operations must be configured at **root collection level** and not child collection level. +> Currently, Microsoft Purview roles related to policy operations must be configured at **root collection level**. #### Permissions to make a data resource available for *Data Use Management* To enable the *Data Use Management* (DUM) toggle for a data source, resource group, or subscription, the same user needs to have both certain IAM privileges on the resource and certain Microsoft Purview privileges. @@ -48,9 +48,9 @@ The following permissions are needed in Microsoft Purview at the **root collecti Check the section on managing Microsoft Purview role assignments in this [guide](../how-to-create-and-manage-collections.md#add-roles-and-restrict-access-through-collections). ->[!WARNING] +>[!Note] > **Known issues** related to permissions -> - In addition to Microsoft Purview *Policy authors* role, user requires *Directory Reader* permission in Azure Active Directory to create data owner policy. Learn more about permissions for [Azure AD Directory Reader](../../active-directory/roles/permissions-reference.md#directory-readers) +> - In addition to Microsoft Purview *Policy authors* role, user may need *Directory Reader* permission in Azure Active Directory to create data owner policy. This is a common permission for users in an Azure tenant. You can check permissions for [Azure AD Directory Reader](../../active-directory/roles/permissions-reference.md#directory-readers) #### Delegation of access control responsibility to Microsoft Purview @@ -58,7 +58,9 @@ Check the section on managing Microsoft Purview role assignments in this [guide] > - IAM Owner role for a data source can be inherited from parent resource group, subscription or subscription Management Group. > - Once a resource has been enabled for *Data Use Management*, **any** Microsoft Purview root-collection *policy author* will be able to create access policies against it, and **any** Microsoft Purview root-collection *Data source admin* will be able to publish those policies at **any point afterwards**. > - **Any** Microsoft Purview root *Collection admin* can assign **new** root-collection *Data Source Admin* and *Policy author* roles. +> - If the Microsoft Purview account is deleted then any published policies will stop being enforced within an amount of time that is dependent on the specific data source. This can have implications both on security and data access availability. With these warnings in mind, here are some **suggested best practices for permissions:** - Minimize the number of people that hold Microsoft Purview root *Collection admin*, root *Data Source Admin* or root *Policy author* roles. - To ensure check and balances, assign the Microsoft Purview *Policy author* and *Data source admin* roles to different people in the organization. With this, before a data policy takes effect, a second person (the *Data source admin*) must review it and explicitly approve it by publishing it. +- A Microsoft Purview account can be deleted by Contributor and Owner roles in IAM. You can check these permissions by navigating to the Access control (IAM) section for your Microsoft Purview account and selecting **Role Assignments**. You can also place a lock to prevent the Microsoft Purview account from being deleted through [ARM locks](../../azure-resource-manager/management/lock-resources.md). diff --git a/articles/purview/includes/workflow-regions.md b/articles/purview/includes/workflow-regions.md deleted file mode 100644 index 4fc7c7e94318..000000000000 --- a/articles/purview/includes/workflow-regions.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -author: nayenama -ms.author: nayenama -ms.service: purview -ms.subservice: purview-data-catalog -ms.topic: include -ms.date: 3/09/2021 ---- - -> [!IMPORTANT] -> At this time, workflows are **not supported** in the following Azure regions: -> - Japan East -> - UAE North \ No newline at end of file diff --git a/articles/purview/register-scan-azure-sql-database.md b/articles/purview/register-scan-azure-sql-database.md index 30e8847ad774..69f26e97f74c 100644 --- a/articles/purview/register-scan-azure-sql-database.md +++ b/articles/purview/register-scan-azure-sql-database.md @@ -132,11 +132,11 @@ To scan your data source, you'll need to configure an authentication method in t The following options are supported: -* **System-assigned managed identity** (Recommended) - This is an identity associated directly with your Microsoft Purview account that allows you to authenticate directly with other Azure resources without needing to manage a go-between user or credential set. The **system-assigned** managed identity is created when your Microsoft Purview resource is created, is managed by Azure, and uses your Microsoft Purview account's name. The SAMI can't currently be used with a self-hosted integration runtime for Azure SQL. For more information, see the [managed identity overview](/azure/active-directory/managed-identities-azure-resources/overview). +* **System-assigned managed identity** (Recommended) - This is an identity associated directly with your Microsoft Purview account that allows you to authenticate directly with other Azure resources without needing to manage a go-between user or credential set. The **system-assigned** managed identity is created when your Microsoft Purview resource is created, is managed by Azure, and uses your Microsoft Purview account's name. The SAMI can't currently be used with a self-hosted integration runtime for Azure SQL. For more information, see the [managed identity overview](../active-directory/managed-identities-azure-resources/overview.md). * **User-assigned managed identity** (preview) - Similar to a SAMI, a user-assigned managed identity (UAMI) is a credential resource that allows Microsoft Purview to authenticate against Azure Active Directory. The **user-assigned** managed by users in Azure, rather than by Azure itself, which gives you more control over security. The UAMI can't currently be used with a self-hosted integration runtime for Azure SQL. For more information, see our [guide for user-assigned managed identities.](manage-credentials.md#create-a-user-assigned-managed-identity) -* **Service Principal**- A service principal is an application that can be assigned permissions like any other group or user, without being associated directly with a person. Their authentication has an expiration date, and so can be useful for temporary projects. For more information, see the [service principal documentation](/azure/active-directory/develop/app-objects-and-service-principals). +* **Service Principal**- A service principal is an application that can be assigned permissions like any other group or user, without being associated directly with a person. Their authentication has an expiration date, and so can be useful for temporary projects. For more information, see the [service principal documentation](../active-directory/develop/app-objects-and-service-principals.md). * **SQL Authentication** - connect to the SQL database with a username and password. For more information about SQL Authentication, you can [follow the SQL authentication documentation](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication). If you need to create a login, follow this [guide to query an Azure SQL database](/azure/azure-sql/database/connect-query-portal), and use [this guide to create a login using T-SQL.](/sql/t-sql/statements/create-login-transact-sql) > [!NOTE] @@ -428,4 +428,4 @@ Now that you've registered your source, follow the below guides to learn more ab - [Data Estate Insights in Microsoft Purview](concept-insights.md) - [Lineage in Microsoft Purview](catalog-lineage-user-guide.md) -- [Search Data Catalog](how-to-search-catalog.md) +- [Search Data Catalog](how-to-search-catalog.md) \ No newline at end of file diff --git a/articles/purview/tutorial-azure-purview-checklist.md b/articles/purview/tutorial-azure-purview-checklist.md index bd78208706ca..edb0c980beaf 100644 --- a/articles/purview/tutorial-azure-purview-checklist.md +++ b/articles/purview/tutorial-azure-purview-checklist.md @@ -19,7 +19,7 @@ This article lists prerequisites that help you get started quickly on Microsoft |1 | Azure Active Directory Tenant |N/A |An [Azure Active Directory tenant](../active-directory/fundamentals/active-directory-access-create-new-tenant.md) should be associated with your subscription.
    • *Global Administrator* or *Information Protection Administrator* role is required, if you plan to [extend Microsoft 365 Sensitivity Labels to Microsoft Purview for files and db columns](create-sensitivity-label.md)
    • *Global Administrator* or *Power BI Administrator* role is required, if you're planning to [scan Power BI tenants](register-scan-power-bi-tenant.md).
    | |2 |An active Azure Subscription |*Subscription Owner* |An Azure subscription is needed to deploy Microsoft Purview and its managed resources. If you don't have an Azure subscription, create a [free subscription](https://azure.microsoft.com/free/) before you begin. | |3 |Define whether you plan to deploy a Microsoft Purview with a managed event hub | N/A |A managed event hub is created as part of Microsoft Purview account creation, see Microsoft Purview account creation. You can publish messages to the event hub kafka topic ATLAS_HOOK and Microsoft Purview will consume and process it. Microsoft Purview will notify entity changes to the event hub kafka topic ATLAS_ENTITIES and user can consume and process it. | -|4 |Register the following resource providers:
    • Microsoft.Storage
    • Microsoft.EventHub (optional)
    • Microsoft.Purview
    |*Subscription Owner* or custom role to register Azure resource providers (_/register/action_) | [Register required Azure Resource Providers](/azure/azure-resource-manager/management/resource-providers-and-types) in the Azure Subscription that is designated for Microsoft Purview Account. Review [Azure resource provider operations](../role-based-access-control/resource-provider-operations.md). | +|4 |Register the following resource providers:
    • Microsoft.Storage
    • Microsoft.EventHub (optional)
    • Microsoft.Purview
    |*Subscription Owner* or custom role to register Azure resource providers (_/register/action_) | [Register required Azure Resource Providers](../azure-resource-manager/management/resource-providers-and-types.md) in the Azure Subscription that is designated for Microsoft Purview Account. Review [Azure resource provider operations](../role-based-access-control/resource-provider-operations.md). | |5 |Update Azure Policy to allow deployment of the following resources in your Azure subscription:
    • Microsoft Purview
    • Azure Storage
    • Azure Event Hubs (optional)
    |*Subscription Owner* |Use this step if an existing Azure Policy prevents deploying such Azure resources. If a blocking policy exists and needs to remain in place, please follow our [Microsoft Purview exception tag guide](create-azure-purview-portal-faq.md) and follow the steps to create an exception for Microsoft Purview accounts. | |6 | Define your network security requirements. | Network and Security architects. |
    • Review [Microsoft Purview network architecture and best practices](concept-best-practices-network.md) to define what scenario is more relevant to your network requirements.
    • If private network is needed, use [Microsoft Purview Managed IR](catalog-managed-vnet.md) to scan Azure data sources when possible to reduce complexity and administrative overhead.
    | |7 |An Azure Virtual Network and Subnet(s) for Microsoft Purview private endpoints. | *Network Contributor* to create or update Azure VNet. |Use this step if you're planning to deploy [private endpoint connectivity with Microsoft Purview](catalog-private-link.md):
    • Private endpoints for **Ingestion**.
    • Private endpoint for Microsoft Purview **Account**.
    • Private endpoint for Microsoft Purview **Portal**.

    Deploy [Azure Virtual Network](../virtual-network/quick-create-portal.md) if you need one. | @@ -54,4 +54,4 @@ This article lists prerequisites that help you get started quickly on Microsoft |35 |Grant access to data roles in the organization |*Collection admin* |Provide access to other teams to use Microsoft Purview:
    • Data curator
    • Data reader
    • Collection admin
    • Data source admin
    • Policy Author
    • Workflow admin

    For more information, see [Access control in Microsoft Purview](catalog-permissions.md). | ## Next steps -- [Review Microsoft Purview deployment best practices](./deployment-best-practices.md) +- [Review Microsoft Purview deployment best practices](./deployment-best-practices.md) \ No newline at end of file diff --git a/articles/purview/tutorial-data-owner-policies-storage.md b/articles/purview/tutorial-data-owner-policies-storage.md index 14d3b7fd4ab4..2ea318cc6660 100644 --- a/articles/purview/tutorial-data-owner-policies-storage.md +++ b/articles/purview/tutorial-data-owner-policies-storage.md @@ -186,6 +186,6 @@ To delete a policy in Microsoft Purview, follow these steps: Check our demo and related tutorials: > [!div class="nextstepaction"] -> [Demo of access policy for Azure Storage](https://docs.microsoft.com/video/media/8ce7c554-0d48-430f-8f63-edf94946947c/purview-policy-storage-dataowner-scenario_mid.mp4) +> [Demo of access policy for Azure Storage](https://learn-video.azurefd.net/vod/player?id=caa25ad3-7927-4dcc-88dd-6b74bcae98a2) > [Concepts for Microsoft Purview data owner policies](./concept-data-owner-policies.md) -> [Enable Microsoft Purview data owner policies on all data sources in a subscription or a resource group](./how-to-data-owner-policies-resource-group.md) +> [Enable Microsoft Purview data owner policies on all data sources in a subscription or a resource group](./how-to-data-owner-policies-resource-group.md) \ No newline at end of file diff --git a/articles/role-based-access-control/resource-provider-operations.md b/articles/role-based-access-control/resource-provider-operations.md index f647dd0a9eb9..a75085e4f113 100644 --- a/articles/role-based-access-control/resource-provider-operations.md +++ b/articles/role-based-access-control/resource-provider-operations.md @@ -5080,7 +5080,7 @@ Azure service: [Azure SQL Database](/azure/azure-sql/database/index), [Azure SQL > | --- | --- | > | Microsoft.Sql/checkNameAvailability/action | Verify whether given server name is available for provisioning worldwide for a given subscription. | > | Microsoft.Sql/register/action | Registers the subscription for the Microsoft SQL Database resource provider and enables the creation of Microsoft SQL Databases. | -> | Microsoft.Sql/unregister/action | UnRegisters the subscription for the Microsoft SQL Database resource provider and enables the creation of Microsoft SQL Databases. | +> | Microsoft.Sql/unregister/action | UnRegisters the subscription for the Microsoft SQL Database resource provider and disables the creation of Microsoft SQL Databases. | > | Microsoft.Sql/privateEndpointConnectionsApproval/action | Determines if user is allowed to approve a private endpoint connection | > | Microsoft.Sql/instancePools/read | Gets an instance pool | > | Microsoft.Sql/instancePools/write | Creates or updates an instance pool | diff --git a/articles/role-based-access-control/transfer-subscription.md b/articles/role-based-access-control/transfer-subscription.md index 4d85bfb9fd09..cfba3398a4f6 100644 --- a/articles/role-based-access-control/transfer-subscription.md +++ b/articles/role-based-access-control/transfer-subscription.md @@ -234,7 +234,7 @@ When you create a key vault, it is automatically tied to the default Azure Activ - Use [az sql server ad-admin list](/cli/azure/sql/server/ad-admin#az-sql-server-ad-admin-list) and the [az graph](/cli/azure/graph) extension to see if you are using Azure SQL databases with Azure AD authentication integration enabled. For more information, see [Configure and manage Azure Active Directory authentication with SQL](/azure/azure-sql/database/authentication-aad-configure). ```azurecli - az sql server ad-admin list --ids $(az graph query -q 'resources | where type == "microsoft.sql/servers" | project id' -o tsv | cut -f1) + az sql server ad-admin list --ids $(az graph query -q "resources | where type == 'microsoft.sql/servers' | project id" -o tsv | cut -f1) ``` ### List ACLs diff --git a/articles/route-server/vmware-solution-default-route.md b/articles/route-server/vmware-solution-default-route.md index ee30cf43e1ec..c132b3bc2c1e 100644 --- a/articles/route-server/vmware-solution-default-route.md +++ b/articles/route-server/vmware-solution-default-route.md @@ -13,7 +13,7 @@ ms.author: halkazwini [Azure VMware Solution](../azure-vmware/introduction.md) is an Azure service where native VMware vSphere workloads run and communicate with other Azure services. This communication happens over ExpressRoute, and Azure Route Server can be used to modify the default behavior of Azure VMware Solution networking. For example, a default route can be injected from a Network Virtual Appliance (NVA) in Azure to attract traffic from AVS and inspect it before sending it out to the public Internet, or to analyze traffic between AVS and the on-premises network. -Additionally, similar designs can be used to interconnect AVS and on-premises networks sending traffic through an NVA, either because traffic inspection is not required or because ExpressRoute Global Reach is not available in the relevant regions. +Additionally, similar designs can be used to interconnect AVS and on-premises networks sending traffic through an NVA, either because traffic inspection isn't required or because ExpressRoute Global Reach isn't available in the relevant regions. ## Topology @@ -33,17 +33,25 @@ There are two main scenarios for this pattern: - ExpressRoute Global Reach might not be available on a particular region to interconnect the ExpressRoute circuits of AVS and the on-premises network. - Some organizations might have the requirement to send traffic between AVS and the on-premises network through an NVA (typically a firewall). -If both ExpressRoute circuits (to AVS and to on-premises) are terminated in the same ExpressRoute gateway, you could think that the gateway is going to route packets across them. However, an ExpressRoute gateway is not designed to do that. Instead, you need to hairpin the traffic to a Network Virtual Appliance that is able to route the traffic. To that purpose, the NVA should advertise a superset of the AVS and on-premises prefixes, as the following diagram shows: +> [!IMPORTANT] +> Global Reach is still the preferred option to connect AVS and on-premises environments, the patterns described in this document add a considerable complexity to the environment. + +If both ExpressRoute circuits (to AVS and to on-premises) are terminated in the same ExpressRoute gateway, you could assume that the gateway is going to route packets across them. However, an ExpressRoute gateway isn't designed to do that. Instead, you need to hairpin the traffic to a Network Virtual Appliance that is able to route the traffic. To that purpose, two actions are required: + +- The NVA should advertise a supernet for the AVS and on-premises prefixes, as the diagram below shows. You could use a supernet that includes both AVS and on-premises prefixes, or individual prefixes for AVS and on-premises (always less specific that the actual prefixes advertised over ExpressRoute). Consider though that all supernet prefixes advertised to Route Server are going to be propagated both to AVS and on-premises. +- UDRs in the GatewaySubnet that exactly match the prefixes advertised from AVS and on-premises will hairpin traffic from the GatewaySubnet to the Network Virtual Appliance. :::image type="content" source="./media/scenarios/vmware-solution-to-on-premises-hairpin.png" alt-text="Diagram of AVS to on-premises communication with Route Server in a single region."::: -As the diagram shows, the NVA needs to advertise a more generic (less specific) prefix that both the on-premises network and AVS. You need to be careful with this approach, since the NVA might be potentially attracting traffic that it should not (since it is advertising wider ranges, in the example above the whole `10.0.0.0/8` network). +As the diagram above shows, the NVA needs to advertise more generic (less specific) prefixes that include the networks from on-premises and AVS. You need to be careful with this approach, since the NVA might be potentially attracting traffic that it shouldn't (since it is advertising wider ranges, in the example above the whole `10.0.0.0/8` network). -If two regions are involved, you would need an NVA in each region, and both NVAs would exchange the routes they learn from their respective Azure Route Servers via BGP and some sort of encapsulation protocol such as VXLAN or IPsec, as the following diagram shows. +If advertising less specific prefixes isn't possible as in the option described above or the UDRs that are required in the GatewaySubnet are not desired or supported (for example because they exceed the maximum limit of routes per route table, see [Azure subscription and service limits](../azure-resource-manager/management/azure-subscription-service-limits.md#networking-limits) for more details), you could instead implement an alternative design using two separate VNets. In this topology, instead of propagating less specific routes to attract traffic to the ExpressRoute gateway, two different NVAs in separate VNets exchange routes between each other, and propagate them to their respective ExpressRoute circuits via BGP and Azure Route Server, as the following diagram shows. Each NVAs has full control on which prefixes are propagated to each ExpressRoute circuit. For example, the diagram below shows how a single 0.0.0.0/0 is advertised to AVS, and the individual AVS prefixes are propagated to the on-premises network: :::image type="content" source="./media/scenarios/vmware-solution-to-on-premises.png" alt-text="Diagram of AVS to on-premises communication with Route Server in two regions."::: -The reason why encapsulation is needed is because the NVA NICs would learn the routes from ExpressRoute or from the Route Server, so they would send packets that need to be routed to the other NVA in the wrong direction (potentially creating a routing loop returning the packets to the local NVA). +Note that some sort of encapsulation protocol such as VXLAN or IPsec is required between the NVAs. The reason why encapsulation is needed is because the NVA NICs would learn the routes from Azure Route Server with the NVA as next hop, and create a routing loop. + +The main difference between this dual VNet design and the previously described single VNet design is that with two VNets you have full control on what is advertised to each ExpressRoute circuit, and this allows for a more dynamic and granular configuration. In comparison, in the single-VNet design described earlier in this document a common set of supernets or less specific prefixes are sent down both circuits to attract traffic to the VNet. Additional, in the single-VNet design there is a static configuration component in the UDRs that are required in the Gateway Subnet. Hence, although less cost-effective (two ExpressRoute gateways and two sets of NVAs are required), the double-VNet design might be a better alternative for very dynamic routing environments. ## Next steps diff --git a/articles/search/TOC.yml b/articles/search/TOC.yml index 664430aa482a..36f70e17ea70 100644 --- a/articles/search/TOC.yml +++ b/articles/search/TOC.yml @@ -364,7 +364,7 @@ href: search-howto-connecting-azure-sql-mi-to-azure-search-using-indexers.md - name: Azure SQL Server VMs href: search-howto-connecting-azure-sql-iaas-to-azure-search-using-indexers.md - - name: Power Query data sources + - name: Power Query (preview - retired) href: search-how-to-index-power-query-data-sources.md - name: SharePoint in Microsoft 365 href: search-howto-index-sharepoint-online.md diff --git a/articles/search/cognitive-search-skill-entity-linking-v3.md b/articles/search/cognitive-search-skill-entity-linking-v3.md index c5a4aa8111a5..8cca97d1f214 100644 --- a/articles/search/cognitive-search-skill-entity-linking-v3.md +++ b/articles/search/cognitive-search-skill-entity-linking-v3.md @@ -4,8 +4,8 @@ titleSuffix: Azure Cognitive Search description: Extract different linked entities from text in an enrichment pipeline in Azure Cognitive Search. manager: jennmar -author: ayokande -ms.author: aakande +author: nitinme +ms.author: nitinme ms.service: cognitive-search ms.topic: reference ms.date: 12/09/2021 diff --git a/articles/search/cognitive-search-skill-entity-recognition-v3.md b/articles/search/cognitive-search-skill-entity-recognition-v3.md index 22ce36306a1e..f526361f1806 100644 --- a/articles/search/cognitive-search-skill-entity-recognition-v3.md +++ b/articles/search/cognitive-search-skill-entity-recognition-v3.md @@ -4,8 +4,8 @@ titleSuffix: Azure Cognitive Search description: Extract different types of entities using the machine learning models of Azure Cognitive Services for Language in an AI enrichment pipeline in Azure Cognitive Search. manager: jennmar -author: ayokande -ms.author: aakande +author: nitinme +ms.author: nitinme ms.service: cognitive-search ms.topic: reference ms.date: 12/09/2021 diff --git a/articles/search/media/search-power-query-connectors/add-pipeline-trigger-postgresql.png b/articles/search/media/search-power-query-connectors/add-pipeline-trigger-postgresql.png new file mode 100644 index 000000000000..d8ccc089f098 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/add-pipeline-trigger-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/add-pipeline-trigger.png b/articles/search/media/search-power-query-connectors/add-pipeline-trigger.png new file mode 100644 index 000000000000..a17a34acae2c Binary files /dev/null and b/articles/search/media/search-power-query-connectors/add-pipeline-trigger.png differ diff --git a/articles/search/media/search-power-query-connectors/author-datasets.png b/articles/search/media/search-power-query-connectors/author-datasets.png new file mode 100644 index 000000000000..4a66e01813db Binary files /dev/null and b/articles/search/media/search-power-query-connectors/author-datasets.png differ diff --git a/articles/search/media/search-power-query-connectors/author-pipelines.png b/articles/search/media/search-power-query-connectors/author-pipelines.png new file mode 100644 index 000000000000..393b829dbb8b Binary files /dev/null and b/articles/search/media/search-power-query-connectors/author-pipelines.png differ diff --git a/articles/search/media/search-power-query-connectors/azure-data-factory-manage-icon.png b/articles/search/media/search-power-query-connectors/azure-data-factory-manage-icon.png new file mode 100644 index 000000000000..abaef0d2ddcd Binary files /dev/null and b/articles/search/media/search-power-query-connectors/azure-data-factory-manage-icon.png differ diff --git a/articles/search/media/search-power-query-connectors/azure-search-dataset-postgresql-save.png b/articles/search/media/search-power-query-connectors/azure-search-dataset-postgresql-save.png new file mode 100644 index 000000000000..c3c06f0802ad Binary files /dev/null and b/articles/search/media/search-power-query-connectors/azure-search-dataset-postgresql-save.png differ diff --git a/articles/search/media/search-power-query-connectors/choose-trigger-new.png b/articles/search/media/search-power-query-connectors/choose-trigger-new.png new file mode 100644 index 000000000000..18ed003acd2a Binary files /dev/null and b/articles/search/media/search-power-query-connectors/choose-trigger-new.png differ diff --git a/articles/search/media/search-power-query-connectors/delimited-text-save-postgresql.png b/articles/search/media/search-power-query-connectors/delimited-text-save-postgresql.png new file mode 100644 index 000000000000..b3ff3f7b1cbe Binary files /dev/null and b/articles/search/media/search-power-query-connectors/delimited-text-save-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/delimited-text-sink.png b/articles/search/media/search-power-query-connectors/delimited-text-sink.png new file mode 100644 index 000000000000..cfee0671c791 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/delimited-text-sink.png differ diff --git a/articles/search/media/search-power-query-connectors/delimited-text-snowflake-save.png b/articles/search/media/search-power-query-connectors/delimited-text-snowflake-save.png new file mode 100644 index 000000000000..543198de5ac6 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/delimited-text-snowflake-save.png differ diff --git a/articles/search/media/search-power-query-connectors/drag-and-drop-snowflake-copy-data.png b/articles/search/media/search-power-query-connectors/drag-and-drop-snowflake-copy-data.png new file mode 100644 index 000000000000..908128a05792 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/drag-and-drop-snowflake-copy-data.png differ diff --git a/articles/search/media/search-power-query-connectors/index-from-storage-activity-postgresql.png b/articles/search/media/search-power-query-connectors/index-from-storage-activity-postgresql.png new file mode 100644 index 000000000000..81db07952799 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/index-from-storage-activity-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/index-from-storage-activity.png b/articles/search/media/search-power-query-connectors/index-from-storage-activity.png new file mode 100644 index 000000000000..70453d4c0b25 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/index-from-storage-activity.png differ diff --git a/articles/search/media/search-power-query-connectors/linked-service-search-new.png b/articles/search/media/search-power-query-connectors/linked-service-search-new.png new file mode 100644 index 000000000000..04cd7146101e Binary files /dev/null and b/articles/search/media/search-power-query-connectors/linked-service-search-new.png differ diff --git a/articles/search/media/search-power-query-connectors/new-dataset-blob-storage.png b/articles/search/media/search-power-query-connectors/new-dataset-blob-storage.png new file mode 100644 index 000000000000..de21dbc28533 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-dataset-blob-storage.png differ diff --git a/articles/search/media/search-power-query-connectors/new-dataset-postgresql.png b/articles/search/media/search-power-query-connectors/new-dataset-postgresql.png new file mode 100644 index 000000000000..8b1fa86f4b87 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-dataset-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/new-dataset-search.png b/articles/search/media/search-power-query-connectors/new-dataset-search.png new file mode 100644 index 000000000000..af460dfeba0b Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-dataset-search.png differ diff --git a/articles/search/media/search-power-query-connectors/new-dataset-snowflake.png b/articles/search/media/search-power-query-connectors/new-dataset-snowflake.png new file mode 100644 index 000000000000..6c07666503d2 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-dataset-snowflake.png differ diff --git a/articles/search/media/search-power-query-connectors/new-dataset.png b/articles/search/media/search-power-query-connectors/new-dataset.png new file mode 100644 index 000000000000..1105b5fc2808 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-dataset.png differ diff --git a/articles/search/media/search-power-query-connectors/new-linked-service-blob.png b/articles/search/media/search-power-query-connectors/new-linked-service-blob.png new file mode 100644 index 000000000000..33933250849e Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-linked-service-blob.png differ diff --git a/articles/search/media/search-power-query-connectors/new-linked-service-postgresql.png b/articles/search/media/search-power-query-connectors/new-linked-service-postgresql.png new file mode 100644 index 000000000000..3c7aadbb887f Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-linked-service-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/new-linked-service-search.png b/articles/search/media/search-power-query-connectors/new-linked-service-search.png new file mode 100644 index 000000000000..9d1b31a3a122 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-linked-service-search.png differ diff --git a/articles/search/media/search-power-query-connectors/new-linked-service-snowflake-form.png b/articles/search/media/search-power-query-connectors/new-linked-service-snowflake-form.png new file mode 100644 index 000000000000..1fd7d62299d8 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-linked-service-snowflake-form.png differ diff --git a/articles/search/media/search-power-query-connectors/new-linked-service.png b/articles/search/media/search-power-query-connectors/new-linked-service.png new file mode 100644 index 000000000000..da3427ba1963 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-linked-service.png differ diff --git a/articles/search/media/search-power-query-connectors/new-pipeline.png b/articles/search/media/search-power-query-connectors/new-pipeline.png new file mode 100644 index 000000000000..632208adfcc9 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-pipeline.png differ diff --git a/articles/search/media/search-power-query-connectors/new-trigger.png b/articles/search/media/search-power-query-connectors/new-trigger.png new file mode 100644 index 000000000000..019b9f367089 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/new-trigger.png differ diff --git a/articles/search/media/search-power-query-connectors/pipeline-link-acitivities-postgresql.png b/articles/search/media/search-power-query-connectors/pipeline-link-acitivities-postgresql.png new file mode 100644 index 000000000000..742a444d6a22 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/pipeline-link-acitivities-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/pipeline-link-activities-snowflake-storage-index.png b/articles/search/media/search-power-query-connectors/pipeline-link-activities-snowflake-storage-index.png new file mode 100644 index 000000000000..e17729b5d2b3 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/pipeline-link-activities-snowflake-storage-index.png differ diff --git a/articles/search/media/search-power-query-connectors/postgresql-index.png b/articles/search/media/search-power-query-connectors/postgresql-index.png new file mode 100644 index 000000000000..b8b6e64811a4 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/postgresql-index.png differ diff --git a/articles/search/media/search-power-query-connectors/postgresql-pipeline-general.png b/articles/search/media/search-power-query-connectors/postgresql-pipeline-general.png new file mode 100644 index 000000000000..799511a97268 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/postgresql-pipeline-general.png differ diff --git a/articles/search/media/search-power-query-connectors/postgresql-set-properties.png b/articles/search/media/search-power-query-connectors/postgresql-set-properties.png new file mode 100644 index 000000000000..43c26d0bc847 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/postgresql-set-properties.png differ diff --git a/articles/search/media/search-power-query-connectors/publish-pipeline-postgresql.png b/articles/search/media/search-power-query-connectors/publish-pipeline-postgresql.png new file mode 100644 index 000000000000..dbaa5012a40d Binary files /dev/null and b/articles/search/media/search-power-query-connectors/publish-pipeline-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/publish-pipeline.png b/articles/search/media/search-power-query-connectors/publish-pipeline.png new file mode 100644 index 000000000000..2a98dc1745d4 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/publish-pipeline.png differ diff --git a/articles/search/media/search-power-query-connectors/sas-url-storage-linked-service-postgresql.png b/articles/search/media/search-power-query-connectors/sas-url-storage-linked-service-postgresql.png new file mode 100644 index 000000000000..14ab85a0e514 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/sas-url-storage-linked-service-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/sas-url-storage-linked-service-snowflake.png b/articles/search/media/search-power-query-connectors/sas-url-storage-linked-service-snowflake.png new file mode 100644 index 000000000000..760ed2cb7b31 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/sas-url-storage-linked-service-snowflake.png differ diff --git a/articles/search/media/search-power-query-connectors/search-postgresql-data-store.png b/articles/search/media/search-power-query-connectors/search-postgresql-data-store.png new file mode 100644 index 000000000000..6b6c68ef28ab Binary files /dev/null and b/articles/search/media/search-power-query-connectors/search-postgresql-data-store.png differ diff --git a/articles/search/media/search-power-query-connectors/search-sink.png b/articles/search/media/search-power-query-connectors/search-sink.png new file mode 100644 index 000000000000..541e81f5f6be Binary files /dev/null and b/articles/search/media/search-power-query-connectors/search-sink.png differ diff --git a/articles/search/media/search-power-query-connectors/set-delimited-text-properties.png b/articles/search/media/search-power-query-connectors/set-delimited-text-properties.png new file mode 100644 index 000000000000..aa97e94f5106 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/set-delimited-text-properties.png differ diff --git a/articles/search/media/search-power-query-connectors/set-search-postgresql-properties.png b/articles/search/media/search-power-query-connectors/set-search-postgresql-properties.png new file mode 100644 index 000000000000..e3f44745e11b Binary files /dev/null and b/articles/search/media/search-power-query-connectors/set-search-postgresql-properties.png differ diff --git a/articles/search/media/search-power-query-connectors/set-search-snowflake-properties.png b/articles/search/media/search-power-query-connectors/set-search-snowflake-properties.png new file mode 100644 index 000000000000..c9b54781bc86 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/set-search-snowflake-properties.png differ diff --git a/articles/search/media/search-power-query-connectors/set-snowflake-properties.png b/articles/search/media/search-power-query-connectors/set-snowflake-properties.png new file mode 100644 index 000000000000..4697166a71f4 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/set-snowflake-properties.png differ diff --git a/articles/search/media/search-power-query-connectors/sink-search-index-postgresql.png b/articles/search/media/search-power-query-connectors/sink-search-index-postgresql.png new file mode 100644 index 000000000000..1553c3284381 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/sink-search-index-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/sink-storage-postgresql.png b/articles/search/media/search-power-query-connectors/sink-storage-postgresql.png new file mode 100644 index 000000000000..a9141316df32 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/sink-storage-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/snowflake-icon.png b/articles/search/media/search-power-query-connectors/snowflake-icon.png new file mode 100644 index 000000000000..1b03cc73b2d7 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/snowflake-icon.png differ diff --git a/articles/search/media/search-power-query-connectors/snowflake-index.png b/articles/search/media/search-power-query-connectors/snowflake-index.png new file mode 100644 index 000000000000..f8714dff3761 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/snowflake-index.png differ diff --git a/articles/search/media/search-power-query-connectors/source-postgresql.png b/articles/search/media/search-power-query-connectors/source-postgresql.png new file mode 100644 index 000000000000..17a13172c5f8 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/source-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/source-snowflake.png b/articles/search/media/search-power-query-connectors/source-snowflake.png new file mode 100644 index 000000000000..e4175b1ffd77 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/source-snowflake.png differ diff --git a/articles/search/media/search-power-query-connectors/source-storage-postgresql.png b/articles/search/media/search-power-query-connectors/source-storage-postgresql.png new file mode 100644 index 000000000000..ffd4d412d43a Binary files /dev/null and b/articles/search/media/search-power-query-connectors/source-storage-postgresql.png differ diff --git a/articles/search/media/search-power-query-connectors/source-storage.png b/articles/search/media/search-power-query-connectors/source-storage.png new file mode 100644 index 000000000000..0d2ded458f51 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/source-storage.png differ diff --git a/articles/search/media/search-power-query-connectors/storage-set-properties-snowflake.png b/articles/search/media/search-power-query-connectors/storage-set-properties-snowflake.png new file mode 100644 index 000000000000..9c9dedcc7414 Binary files /dev/null and b/articles/search/media/search-power-query-connectors/storage-set-properties-snowflake.png differ diff --git a/articles/search/media/search-power-query-connectors/trigger-postgresql.png b/articles/search/media/search-power-query-connectors/trigger-postgresql.png new file mode 100644 index 000000000000..85cb06d9a5af Binary files /dev/null and b/articles/search/media/search-power-query-connectors/trigger-postgresql.png differ diff --git a/articles/search/search-api-preview.md b/articles/search/search-api-preview.md index 2dd0c8c1b808..193bb35ca412 100644 --- a/articles/search/search-api-preview.md +++ b/articles/search/search-api-preview.md @@ -8,7 +8,7 @@ author: HeidiSteen ms.author: heidist ms.service: cognitive-search ms.topic: conceptual -ms.date: 12/03/2021 +ms.date: 05/27/2022 --- # Preview features in Azure Cognitive Search @@ -23,7 +23,6 @@ Preview features that transition to general availability are removed from this l | [**Search REST API 2021-04-30-Preview**](/rest/api/searchservice/index-preview) | Security | Modifies [Create or Update Data Source](/rest/api/searchservice/preview-api/create-or-update-data-source) to support managed identities under Azure Active Directory, for indexers that connect to external data sources. | Public preview, [Search REST API 2021-04-30-Preview](/rest/api/searchservice/index-preview). Announced in May 2021. | | [**Management REST API 2021-04-01-Preview**](/rest/api/searchmanagement/) | Security | Modifies [Create or Update Service](/rest/api/searchmanagement/2021-04-01-preview/services/create-or-update) to support new [DataPlaneAuthOptions](/rest/api/searchmanagement/2021-04-01-preview/services/create-or-update#dataplaneauthoptions). | Public preview, [Management REST API](/rest/api/searchmanagement/), API version 2021-04-01-Preview. Announced in May 2021. | | [**Reset Documents**](search-howto-run-reset-indexers.md) | Indexer | Reprocesses individually selected search documents in indexer workloads. | Use the [Reset Documents REST API](/rest/api/searchservice/preview-api/reset-documents), API versions 2021-04-30-Preview or 2020-06-30-Preview. | -| [**Power Query connectors**](search-how-to-index-power-query-data-sources.md) | Indexer data source | Indexers can now index from other cloud platforms. If you are using an indexer to crawl external data sources for indexing, you can now use Power Query connectors to connect to Amazon Redshift, Elasticsearch, PostgreSQL, Salesforce Objects, Salesforce Reports, Smartsheet, and Snowflake. | [Sign up](https://aka.ms/azure-cognitive-search/indexer-preview) is required so that support can be enabled for your subscription on the backend. Configure this data source using [Create or Update Data Source](/rest/api/searchservice/preview-api/create-or-update-data-source), API versions 2021-04-30-Preview or 2020-06-30-Preview, or the Azure portal.| | [**SharePoint Indexer**](search-howto-index-sharepoint-online.md) | Indexer data source | New data source for indexer-based indexing of SharePoint content. | [Sign up](https://aka.ms/azure-cognitive-search/indexer-preview) is required so that support can be enabled for your subscription on the backend. Configure this data source using [Create or Update Data Source](/rest/api/searchservice/preview-api/create-or-update-data-source), API versions 2021-04-30-Preview or 2020-06-30-Preview, or the Azure portal. | | [**MySQL indexer data source**](search-howto-index-mysql.md) | Indexer data source | Index content and metadata from Azure MySQL data sources.| [Sign up](https://aka.ms/azure-cognitive-search/indexer-preview) is required so that support can be enabled for your subscription on the backend. Configure this data source using [Create or Update Data Source](/rest/api/searchservice/preview-api/create-or-update-data-source), API versions 2021-04-30-Preview or 2020-06-30-Preview, [.NET SDK 11.2.1](/dotnet/api/azure.search.documents.indexes.models.searchindexerdatasourcetype.mysql), and Azure portal. | | [**Cosmos DB indexer: MongoDB API, Gremlin API**](search-howto-index-cosmosdb.md) | Indexer data source | For Cosmos DB, SQL API is generally available, but MongoDB and Gremlin APIs are in preview. | For MongoDB and Gremlin, [sign up first](https://aka.ms/azure-cognitive-search/indexer-preview) so that support can be enabled for your subscription on the backend. MongoDB data sources can be configured in the portal. Configure this data source using [Create or Update Data Source](/rest/api/searchservice/preview-api/create-or-update-data-source), API versions 2021-04-30-Preview or 2020-06-30-Preview. | diff --git a/articles/search/search-data-sources-gallery.md b/articles/search/search-data-sources-gallery.md index a15628936150..cb85e8141e58 100644 --- a/articles/search/search-data-sources-gallery.md +++ b/articles/search/search-data-sources-gallery.md @@ -8,7 +8,7 @@ ms.author: heidist ms.service: cognitive-search ms.topic: conceptual layout: LandingPage -ms.date: 01/25/2022 +ms.date: 05/27/2022 --- @@ -18,7 +18,6 @@ Find a data connector from Microsoft or a partner to simplify data ingestion int + [Generally available data sources by Cognitive Search](#ga) + [Preview data sources by Cognitive Search](#preview) -+ [Power Query Connectors (preview)](#powerquery) + [Data sources from our Partners](#partners) @@ -241,146 +240,6 @@ Connect to Azure Storage through Azure Files share to extract content serialized --- - - -## Power Query Connectors (preview) - -Connect to data on other cloud platforms using indexers and a Power Query connector as the data source. [Sign up](https://aka.ms/azure-cognitive-search/indexer-preview) to get started. - -:::row::: -:::column span=""::: - ---- - -### Amazon Redshift - -Powered by [Power Query](/power-query/power-query-what-is-power-query) - -Connect to [Amazon Redshift](https://aws.amazon.com/redshift/) and extract searchable content for indexing in Cognitive Search. - -[More details](search-how-to-index-power-query-data-sources.md) - -:::column-end::: -:::column span=""::: - ---- - -### Elasticsearch - -Powered by [Power Query](/power-query/power-query-what-is-power-query) - -Connect to [Elasticsearch](https://www.elastic.co/elasticsearch) in the cloud and extract searchable content for indexing in Cognitive Search. - -[More details](search-how-to-index-power-query-data-sources.md) - -:::column-end::: -:::column span=""::: - ---- - -### PostgreSQL - -Powered by [Power Query](/power-query/power-query-what-is-power-query) - -Connect to a [PostgreSQL](https://www.postgresql.org/) database in the cloud and extract searchable content for indexing in Cognitive Search. - -[More details](search-how-to-index-power-query-data-sources.md) - -:::column-end::: -:::row-end::: -:::row::: -:::column span=""::: - - :::column-end::: - :::column span=""::: - :::column-end::: - -:::row-end::: - -:::row::: -:::column span=""::: - ---- - -### Salesforce Objects - -Powered by [Power Query](/power-query/power-query-what-is-power-query) - -Connect to Salesforce Objects and extract searchable content for indexing in Cognitive Search. - -[More details](search-how-to-index-power-query-data-sources.md) - -:::column-end::: -:::column span=""::: - ---- - -### Salesforce Reports - -Powered by [Power Query](/power-query/power-query-what-is-power-query) - -Connect to Salesforce Reports and extract searchable content for indexing in Cognitive Search. - -[More details](search-how-to-index-power-query-data-sources.md) - -:::column-end::: -:::column span=""::: - ---- - -### Smartsheet - -Powered by [Power Query](/power-query/power-query-what-is-power-query) - -Connect to Smartsheet and extract searchable content for indexing in Cognitive Search. - -[More details](search-how-to-index-power-query-data-sources.md) - -:::column-end::: -:::row-end::: -:::row::: -:::column span=""::: - - :::column-end::: - :::column span=""::: - :::column-end::: - -:::row-end::: - -:::row::: -:::column span=""::: - ---- - -### Snowflake - -Powered by [Power Query](/power-query/power-query-what-is-power-query) - -Extract searchable data and metadata from a Snowflake database and populate an index based on field-to-field mappings between the index and your data source. - -[More details](search-how-to-index-power-query-data-sources.md) - -:::column-end::: -:::column span=""::: - ---- - -:::column-end::: -:::column span=""::: - ---- - -:::column-end::: -:::row-end::: -:::row::: -:::column span=""::: - - :::column-end::: - :::column span=""::: - :::column-end::: - -:::row-end::: - ## Data sources from our Partners diff --git a/articles/search/search-get-started-arm.md b/articles/search/search-get-started-arm.md index 9300a225a1bc..78b5ebead146 100644 --- a/articles/search/search-get-started-arm.md +++ b/articles/search/search-get-started-arm.md @@ -8,7 +8,7 @@ ms.author: heidist ms.service: cognitive-search ms.topic: quickstart ms.custom: subject-armqs, mode-arm -ms.date: 05/16/2022 +ms.date: 05/25/2022 --- # Quickstart: Deploy Cognitive Search using an Azure Resource Manager template @@ -17,9 +17,9 @@ This article walks you through the process for using an Azure Resource Manager ( [!INCLUDE [About Azure Resource Manager](../../includes/resource-manager-quickstart-introduction.md)] -Only those properties included in the template are used in the deployment. If more customization is required, such as [setting up network security](search-security-overview.md#network-security), you can [update the service configuration](/cli/azure/search/service?view=azure-cli-latest#az-search-service-update) as a post-deployment task. +Only those properties included in the template are used in the deployment. If more customization is required, such as [setting up network security](search-security-overview.md#network-security), you can update the service as a post-deployment task. To customize an existing service with the fewest steps, use [Azure CLI](search-manage-azure-cli.md) or [Azure PowerShell](search-manage-powershell.md). If you're evaluating preview features, use the [Management REST API](search-manage-rest.md). -If your environment meets the prerequisites and you're familiar with using ARM templates, select the **Deploy to Azure** button. The template will open in the Azure portal. +Assuming your environment meets the prerequisites and you're familiar with using ARM templates, select the **Deploy to Azure** button. The template will open in the Azure portal. [![Deploy to Azure](../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fazure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.search%2Fazure-search-create%2Fazuredeploy.json) diff --git a/articles/search/search-get-started-bicep.md b/articles/search/search-get-started-bicep.md index 9a62b2723bfd..76e531670afa 100644 --- a/articles/search/search-get-started-bicep.md +++ b/articles/search/search-get-started-bicep.md @@ -16,7 +16,7 @@ This article walks you through the process for using a Bicep file to deploy an A [!INCLUDE [About Bicep](../../includes/resource-manager-quickstart-bicep-introduction.md)] -Only those properties included in the template are used in the deployment. If more customization is required, such as [setting up network security](search-security-overview.md#network-security), you can [update the service configuration](/cli/azure/search/service?view=azure-cli-latest#az-search-service-update) as a post-deployment task. +Only those properties included in the template are used in the deployment. If more customization is required, such as [setting up network security](search-security-overview.md#network-security), you can update the service as a post-deployment task. To customize an existing service with the fewest steps, use [Azure CLI](search-manage-azure-cli.md) or [Azure PowerShell](search-manage-powershell.md). If you're evaluating preview features, use the [Management REST API](search-manage-rest.md). ## Prerequisites diff --git a/articles/search/search-how-to-index-power-query-data-sources.md b/articles/search/search-how-to-index-power-query-data-sources.md index cdffa04562ec..f0016a50e3a4 100644 --- a/articles/search/search-how-to-index-power-query-data-sources.md +++ b/articles/search/search-how-to-index-power-query-data-sources.md @@ -1,26 +1,540 @@ --- -title: Index data using Power Query connectors (preview) +title: Power Query connectors (preview - retired) titleSuffix: Azure Cognitive Search description: Import data from different data sources using the Power Query connectors. author: gmndrg ms.author: gimondra -manager: nitinme +manager: liamca ms.service: cognitive-search ms.topic: conceptual -ms.date: 12/17/2021 +ms.date: 05/27/2022 ms.custom: references_regions --- -# Index data using Power Query connectors (preview) +# Power Query connectors (preview - retired) > [!IMPORTANT] -> Power Query connector support is currently in a **gated public preview**. [Sign up](https://aka.ms/azure-cognitive-search/indexer-preview) to request access. +> Power Query connector support was introduced as a **gated public preview** under [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/), but is now discontinued. If you have a search solution that uses a Power Query connector, please migrate to an alternative solution. -If you are using an indexer to crawl external data sources for indexing, you can now use select [Power Query](/power-query/power-query-what-is-power-query) connectors for your data source connection in Azure Cognitive Search. +## Migrate by November 28 -Power Query connectors can reach a broader range of data sources, including those on other cloud providers. New data sources supported in this preview include: +The Power Query connector preview was announced in May 2021 and won't be moving forward into general availability. The following migration guidance is available for Snowflake and PostgreSQL. If you're using a different connector and need migration instructions, please use the email contact information provided in your preview sign up to request help or open a ticket with Azure Support. + +## Prerequisites + +- An Azure Storage account. If you don't have one, [create a storage account](../storage/common/storage-account-create.md). +- An Azure Data Factory. If you don't have one, [create a Data Factory](../data-factory/quickstart-create-data-factory-portal.md). See [Data Factory Pipelines Pricing](https://azure.microsoft.com/pricing/details/data-factory/data-pipeline/) before implementation to understand the associated costs. Also, check [Data Factory pricing through examples](../data-factory/pricing-concepts.md). + +## Migrate a Snowflake data pipeline + +This section explains how to copy data from a Snowflake database to an [Azure Cognitive Search index](search-what-is-an-index.md). There's no process for directly indexing from Snowflake to Azure Cognitive Search, so this section includes a staging phase that copies database content to an Azure Storage blob container. You'll then index from that staging container using a [Data Factory pipeline](../data-factory/quickstart-create-data-factory-portal.md). + +### Step 1: Retrieve Snowflake database information + +1. Go to [Snowflake](https://app.snowflake.com/) and sign in to your Snowflake account. A Snowflake account looks like *https://.snowflakecomputing.com*. + +1. Once you're signed in, collect the following information from the left pane. You'll use this information in the next step: + + - From **Data**, select **Databases** and copy the name of the database source. + - From **Admin**, select **Users & Roles** and copy the name of the user. Make sure the user has read permissions. + - From **Admin**, select **Accounts** and copy the **LOCATOR** value of the account. + - From the Snowflake URL, similar to `https://app.snowflake.com//xy12345/organization)`. copy the region name. For example, in `https://app.snowflake.com/south-central-us.azure/xy12345/organization`, the region name is `south-central-us.azure`. + - From **Admin**, select **Warehouses** and copy the name of the warehouse associated with the database you'll use as the source. + +### Step 2: Configure Snowflake Linked Service + +1. Sign in to [Azure Data Factory Studio](https://ms-adf.azure.com/) with your Azure account. + +1. Select your data factory and then select **Continue**. + +1. From the left menu, select the **Manage** icon. + + :::image type="content" source="media/search-power-query-connectors/azure-data-factory-manage-icon.png" alt-text="Screenshot showing how to choose the Manage icon in Azure Data Factory to configure Snowflake Linked Service."::: + +1. Under **Linked services**, select **New**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service.png" alt-text="Screenshot showing how to choose New Linked Service in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "snowflake". Select the **Snowflake** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/snowflake-icon.png" alt-text="Screenshot showing how to choose Snowflake tile in new Linked Service data store." border="true"::: + +1. Fill out the **New linked service** form with the data you collected in the previous step. The **Account name** includes a **LOCATOR** value and the region (for example: `xy56789south-central-us.azure`). + + :::image type="content" source="media/search-power-query-connectors/new-linked-service-snowflake-form.png" alt-text="Screenshot showing how to fill out Snowflake Linked Service form."::: + +1. After the form is completed, select **Test connection**. + +1. If the test is successful, select **Create**. + +### Step 3: Configure Snowflake Dataset + +1. From the left menu, select the **Author** icon. + +1. Select **Datasets**, and then select the Datasets Actions ellipses menu (`...`). + + :::image type="content" source="media/search-power-query-connectors/author-datasets.png" alt-text="Screenshot showing how to choose the Author icon and datasets option."::: + +1. Select **New dataset**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset.png" alt-text="Screenshot showing how to choose a new dataset in Azure Data Factory for Snowflake."::: + +1. On the right pane, in the data store search, enter "snowflake". Select the **Snowflake** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset-snowflake.png" alt-text="Screenshot showing how to choose Snowflake from data source for Dataset."::: + +1. In **Set Properties**: + - Select the Linked Service you created in [Step 2](#step-2-configure-snowflake-linked-service). + - Select the table that you would like to import, and then select **OK**. + + :::image type="content" source="media/search-power-query-connectors/set-snowflake-properties.png" alt-text="Screenshot showing how to configure dataset properties for Snowflake."::: + +1. Select **Save**. + +### Step 4: Create a new index in Azure Cognitive Search + +[Create a new index](/rest/api/searchservice/create-index) in your Azure Cognitive Search service with the same schema as the one you have currently configured for your Snowflake data. + +You can repurpose the index you're currently using for the Snowflake Power Connector. In the Azure portal, find the index and then select **Index Definition (JSON)**. Select the definition and copy it to the body of your new index request. + + :::image type="content" source="media/search-power-query-connectors/snowflake-index.png" alt-text="Screenshot showing how to copy existing Azure Cognitive Search index JSON configuration for existing Snowflake index."::: + +### Step 5: Configure Azure Cognitive Search Linked Service + +1. From the left menu, select **Manage** icon. + + :::image type="content" source="media/search-power-query-connectors/azure-data-factory-manage-icon.png" alt-text="Screenshot showing how to choose the Manage icon in Azure Data Factory to add a new linked service."::: + +1. Under **Linked services**, select **New**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service.png" alt-text="Screenshot showing how to choose New Linked Service in Azure Data Factory for Cognitive Search."::: + +1. On the right pane, in the data store search, enter "search". Select the **Azure Search** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/linked-service-search-new.png" alt-text="Screenshot showing how to choose New Linked Search in Azure Data Factory to import from Snowflake."::: + +1. Fill out the **New linked service** values: + + - Choose the Azure subscription where your Azure Cognitive Search service resides. + - Choose the Azure Cognitive Search service that has your Power Query connector indexer. + - Select **Create**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service-search.png" alt-text="Screenshot showing how to choose New Linked Search Service in Azure Data Factory with its properties to import from Snowflake."::: + +### Step 6: Configure Azure Cognitive Search Dataset + +1. From the left menu, select **Author** icon. + +1. Select **Datasets**, and then select the Datasets Actions ellipses menu (`...`). + + :::image type="content" source="media/search-power-query-connectors/author-datasets.png" alt-text="Screenshot showing how to choose the Author icon and datasets option for Cognitive Search."::: + +1. Select **New dataset**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset.png" alt-text="Screenshot showing how to choose a new dataset in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "search". Select the **Azure Search** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset-search.png" alt-text="Screenshot showing how to choose an Azure Cognitive Search service for a Dataset in Azure Data Factory to use as sink."::: + +1. In **Set properties**: + - Select the Linked service recently created in [Step 5](#step-5-configure-azure-cognitive-search-linked-service). + - Choose the search index that you created in [Step 4](#step-4-create-a-new-index-in-azure-cognitive-search). + - Select **OK**. + + :::image type="content" source="media/search-power-query-connectors/set-search-snowflake-properties.png" alt-text="Screenshot showing how to choose New Search Linked Service in Azure Data Factory for Snowflake."::: + +1. Select **Save**. + +### Step 7: Configure Azure Blob Storage Linked Service + +1. From the left menu, select **Manage** icon. + + :::image type="content" source="media/search-power-query-connectors/azure-data-factory-manage-icon.png" alt-text="Screenshot showing how to choose the Manage icon in Azure Data Factory to link a new service."::: + +1. Under **Linked services**, select **New**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service.png" alt-text="Screenshot showing how to choose New Linked Service in Azure Data Factory to assign a storage account."::: + +1. On the right pane, in the data store search, enter "storage". Select the **Azure Blob Storage** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service-blob.png" alt-text="Screenshot showing how to choose New Linked Blob Storage Service to use as sink for Snowflake in Azure Data Factory."::: + +1. Fill out the **New linked service** values: + + - Choose the Authentication type: SAS URI. Only this authentication type can be used to import data from Snowflake into Azure Blob Storage. + - [Generate a SAS URL](../cognitive-services/Translator/document-translation/create-sas-tokens.md) for the storage account you'll be using for staging. Paste the Blob SAS URL into the SAS URL field. + - Select **Create**. + + :::image type="content" source="media/search-power-query-connectors/sas-url-storage-linked-service-snowflake.png" alt-text="Screenshot showing how to fill out New Linked Search Service form in Azure Data Factory with its properties to import from SnowFlake."::: + +### Step 8: Configure Storage dataset + +1. From the left menu, select **Author** icon. + +1. Select **Datasets**, and then select the Datasets Actions ellipses menu (`...`). + + :::image type="content" source="media/search-power-query-connectors/author-datasets.png" alt-text="Screenshot showing how to choose the Author icon and datasets option."::: + +1. Select **New dataset**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset.png" alt-text="Screenshot showing how to choose a new dataset for storage in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "storage". Select the **Azure Blob Storage** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset-blob-storage.png" alt-text="Screenshot showing how to choose a new blob storage data store in Azure Data Factory for staging."::: + +1. Select **DelimitedText** format and select **Continue**. + +1. In **Set Properties**: + - Under **Linked service**, select the linked service created in [Step 7](#step-7-configure-azure-blob-storage-linked-service). + - Under **File path**, choose the container that will be the sink for the staging process and select **OK**. + + :::image type="content" source="media/search-power-query-connectors/set-delimited-text-properties.png" alt-text="Screenshot showing how to configure properties for storage dataset for Snowflake in Azure Data Factory."::: + + - In **Row delimiter**, select *Line feed (\n)*. + - Check **First row as a header** box. + - Select **Save**. + + :::image type="content" source="media/search-power-query-connectors/delimited-text-snowflake-save.png" alt-text="Screenshot showing how to save a DelimitedText configuration to be used as sink for Snowflake." border="true"::: + +### Step 9: Configure Pipeline + +1. From the left menu, select **Author** icon. + +1. Select **Pipelines**, and then select the Pipelines Actions ellipses menu (`...`). + + :::image type="content" source="media/search-power-query-connectors/author-pipelines.png" alt-text="Screenshot showing how to choose the Author icon and Pipelines option to configure Pipeline for Snowflake data transformation."::: + +1. Select **New pipeline**. + + :::image type="content" source="media/search-power-query-connectors/new-pipeline.png" alt-text="Screenshot showing how to choose a new Pipeline in Azure Data Factory to create for Snowflake data ingestion."::: + +1. Create and configure the [Data Factory activities](../data-factory/concepts-pipelines-activities.md) that copy from Snowflake to Azure Storage container: + + - Expand **Move & transform** section and drag and drop **Copy Data** activity to the blank pipeline editor canvas. + + :::image type="content" source="media/search-power-query-connectors/drag-and-drop-snowflake-copy-data.png" alt-text="Screenshot showing how to drag and drop a Copy data activity in Pipeline canvas to copy data from Snowflake."::: + + - Open the **General** tab. Accept the default values unless you need to customize the execution. + + - In the **Source** tab, select your Snowflake table. Leave the remaining options with the default values. + + :::image type="content" source="media/search-power-query-connectors/source-snowflake.png" alt-text="Screenshot showing how to configure the Source in a pipeline to import data from Snowflake."::: + + - In the **Sink** tab: + + - Select *Storage DelimitedText* dataset created in [Step 8](#step-8-configure-storage-dataset). + - In **File Extension**, add *.csv*. + - Leave the remaining options with the default values. + + :::image type="content" source="media/search-power-query-connectors/delimited-text-sink.png" alt-text="Screenshot showing how to configure the sink in a Pipeline to move the data to Azure Storage from Snowflake."::: + + - Select **Save**. + +1. Configure the activities that copy from Azure Storage Blob to a search index: + + - Expand **Move & transform** section and drag and drop **Copy Data** activity to the blank pipeline editor canvas. + + :::image type="content" source="media/search-power-query-connectors/index-from-storage-activity.png" alt-text="Screenshot showing how to drag and drop a Copy data activity in Pipeline canvas to index from Storage."::: + + - In the **General** tab, accept the default values, unless you need to customize the execution. + + - In the **Source** tab: + + - Select *Storage DelimitedText* dataset created in [Step 8](#step-8-configure-storage-dataset). + - In the **File path type** select *Wildcard file path*. + - Leave all remaining fields with default values. + + :::image type="content" source="media/search-power-query-connectors/source-snowflake.png" alt-text="Screenshot showing how to configure the Source in a pipeline to import data from blob storage to Azure Cognitive Search index for staging phase."::: + + - In the **Sink** tab, select your Azure Cognitive Search index. Leave the remaining options with the default values. + + :::image type="content" source="media/search-power-query-connectors/search-sink.png" alt-text="Screenshot showing how to configure the Sink in a pipeline to import data from blob storage to Azure Cognitive Search index as final step from pipeline."::: + + - Select **Save**. + +### Step 10: Configure Activity order + +1. In the Pipeline canvas editor, select the little green square at the edge of the pipeline activity tile. Drag it to the "Indexes from Storage Account to Azure Cognitive Search" activity to set the execution order. + +1. Select **Save**. + + :::image type="content" source="media/search-power-query-connectors/pipeline-link-activities-snowflake-storage-index.png" alt-text="Screenshot showing how to link Pipeline activities to provide the order of execution for Snowflake."::: + +### Step 11: Add a Pipeline trigger + +1. Select [Add trigger](../data-factory/how-to-create-schedule-trigger.md) to schedule the pipeline run and select **New/Edit**. + + :::image type="content" source="media/search-power-query-connectors/add-pipeline-trigger.png" alt-text="Screenshot showing how to add a new trigger for a Pipeline in Data Factory to run for Snowflake." border="true"::: + +1. From the **Choose trigger** dropdown, select **New**. + + :::image type="content" source="media/search-power-query-connectors/choose-trigger-new.png" alt-text="Screenshot showing how to select adding a new trigger for a Pipeline in Data Factory for Snowflake."::: + +1. Review the trigger options to run the pipeline and select **OK**. + + :::image type="content" source="media/search-power-query-connectors/new-trigger.png" alt-text="Screenshot showing how to configure a trigger to run a Pipeline in Data Factory for Snowflake."::: + +1. Select **Save**. + +1. Select **Publish**. + + :::image type="content" source="media/search-power-query-connectors/publish-pipeline.png" alt-text="How to Publish a Pipeline in Data Factory for Snowflake ingestion to index." border="true"::: + +## Migrate a PostgreSQL data pipeline + +This section explains how to copy data from a PostgreSQL database to an [Azure Cognitive Search index](search-what-is-an-index.md). There's no process for directly indexing from PostgreSQL to Azure Cognitive Search, so this section includes a staging phase that copies database content to an Azure Storage blob container. You'll then index from that staging container using a [Data Factory pipeline](../data-factory/quickstart-create-data-factory-portal.md). + +### Step 1: Configure PostgreSQL Linked Service + +1. Sign in to [Azure Data Factory Studio](https://ms-adf.azure.com/) with your Azure account. + +1. Choose your Data Factory and select **Continue**. + +1. From the left menu, select the **Manage** icon. + + :::image type="content" source="media/search-power-query-connectors/azure-data-factory-manage-icon.png" alt-text="How to choose the Manage icon in Azure Data Factory."::: + +1. Under **Linked services**, select **New**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service.png" alt-text="Screenshot showing how to choose New Linked Service in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "postgresql". Select the **PostgreSQL** tile that represents where your PostgreSQL database is located (Azure or other) and select **Continue**. In this example, PostgreSQL database is located in Azure. + + :::image type="content" source="media/search-power-query-connectors/search-postgresql-data-store.png" alt-text="How to choose PostgreSQL data store for a Linked Service in Azure Data Factory."::: + +1. Fill out the **New linked service** values: + + - In **Account selection method**, select **Enter manually**. + - From your Azure Database for PostgreSQL Overview page in the [Azure portal](https://portal.azure.com/), paste the following values into their respective field: + - Add *Server name* to **Fully qualified domain name**. + - Add *Admin username* to **User name**. + - Add *Database* to **Database name**. + - Enter the Admin username password to **Username password**. + - Select **Create**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service-postgresql.png" alt-text="Choose the Manage icon in Azure Data Factory"::: + +### Step 2: Configure PostgreSQL Dataset + +1. From the left menu, select **Author** icon. + +1. Select **Datasets**, and then select the Datasets Actions ellipses menu (`...`). + + :::image type="content" source="media/search-power-query-connectors/author-datasets.png" alt-text="Screenshot showing how to choose the Author icon and datasets option."::: + +1. Select **New dataset**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset.png" alt-text="Screenshot showing how to choose a new dataset in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "postgresql". Select the **Azure PostgreSQL** tile. Select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset-postgresql.png" alt-text="Screenshot showing how to choose PostgreSQL data store for a Dataset in Azure Data Factory." border="true"::: + +1. Fill out the **Set properties** values: + + - Choose the PostgreSQL Linked Service created in [Step 1](#step-1-configure-postgresql-linked-service). + - Select the table you would like to import/index. + - Select **OK**. + + :::image type="content" source="media/search-power-query-connectors/postgresql-set-properties.png" alt-text="Screenshot showing how to set PostgreSQL properties for dataset in Azure Data Factory."::: + +1. Select **Save**. + +### Step 3: Create a new index in Azure Cognitive Search + +[Create a new index](/rest/api/searchservice/create-index) in your Azure Cognitive Search service with the same schema as the one used for your PostgreSQL data. + +You can repurpose the index you're currently using for the PostgreSQL Power Connector. In the Azure portal, find the index and then select **Index Definition (JSON)**. Select the definition and copy it to the body of your new index request. + + :::image type="content" source="media/search-power-query-connectors/postgresql-index.png" alt-text="Screenshot showing how to copy existing Azure Cognitive Search index JSON configuration."::: + +### Step 4: Configure Azure Cognitive Search Linked Service + +1. From the left menu, select the **Manage** icon. + + :::image type="content" source="media/search-power-query-connectors/azure-data-factory-manage-icon.png" alt-text="Screenshot showing how to choose the Manage icon in Azure Data Factory to link a service."::: + +1. Under **Linked services**, select **New**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service.png" alt-text="Screenshot showing how to choose New Linked Service in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "search". Select **Azure Search** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/linked-service-search-new.png" alt-text="Screenshot showing how to choose New Linked Search service in Azure Data Factory." border="true"::: + +1. Fill out the **New linked service** values: + + - Choose the Azure subscription where your Azure Cognitive Search service resides. + - Choose the Azure Cognitive Search service that has your Power Query connector indexer. + - Select **Create**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service-search.png" alt-text="Screenshot showing how to choose New Linked Search Service in Azure Data Factory with its properties to import from PostgreSQL."::: + +### Step 5: Configure Azure Cognitive Search Dataset + +1. From the left menu, select **Author** icon. + +1. Select **Datasets**, and then select the Datasets Actions ellipses menu (`...`). + + :::image type="content" source="media/search-power-query-connectors/author-datasets.png" alt-text="Screenshot showing how to choose the Author icon and datasets option."::: + +1. Select **New dataset**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset.png" alt-text="Screenshot showing how to choose a new dataset in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "search". Select the **Azure Search** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset-search.png" alt-text="Screenshot showing how to choose an Azure Cognitive Search service for a Dataset in Azure Data Factory."::: + +1. In **Set properties**: + + - Select the Linked service created for Azure Cognitive Search in [Step 4](#step-4-configure-azure-cognitive-search-linked-service). + - Choose the index that you created as part of [Step 3](#step-3-create-a-new-index-in-azure-cognitive-search). + - Select **OK**. + + :::image type="content" source="media/search-power-query-connectors/set-search-postgresql-properties.png" alt-text="Screenshot showing how to fill out Set Properties for search dataset."::: + +1. Select **Save**. + +### Step 6: Configure Azure Blob Storage Linked Service + +1. From the left menu, select **Manage** icon. + + :::image type="content" source="media/search-power-query-connectors/azure-data-factory-manage-icon.png" alt-text="Screenshot showing how to choose the Manage icon in Azure Data Factory to link a service."::: + +1. Under **Linked services**, select **New**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service.png" alt-text="Screenshot showing how to choose New Linked Service in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "storage". Select the **Azure Blob Storage** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/new-linked-service-blob.png" alt-text="Screenshot showing how to choose a new data store"::: + +1. Fill out the **New linked service** values: + + - Choose the **Authentication type**: *SAS URI*. Only this method can be used to import data from PostgreSQL into Azure Blob Storage. + - [Generate a SAS URL](../cognitive-services/Translator/document-translation/create-sas-tokens.md) for the storage account you will be using for staging and copy the Blob SAS URL to SAS URL field. + - Select **Create**. + + :::image type="content" source="media/search-power-query-connectors/sas-url-storage-linked-service-postgresql.png" alt-text="Screenshot showing how to fill out New Linked Search Service form in Azure Data Factory with its properties to import from PostgreSQL."::: + +### Step 7: Configure Storage dataset + +1. From the left menu, select **Author** icon. + +1. Select **Datasets**, and then select the Datasets Actions ellipses menu (`...`). + + :::image type="content" source="media/search-power-query-connectors/author-datasets.png" alt-text="Screenshot showing how to choose the Author icon and datasets option."::: + +1. Select **New dataset**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset.png" alt-text="Screenshot showing how to choose a new dataset in Azure Data Factory."::: + +1. On the right pane, in the data store search, enter "storage". Select the **Azure Blob Storage** tile and select **Continue**. + + :::image type="content" source="media/search-power-query-connectors/new-dataset-blob-storage.png" alt-text="Screenshot showing how to choose a new blob storage data store in Azure Data Factory."::: + +1. Select **DelimitedText** format and select **Continue**. + +1. In **Row delimiter**, select *Line feed (\n)*. + +1. Check **First row as a header** box. + +1. Select **Save**. + + :::image type="content" source="media/search-power-query-connectors/delimited-text-save-postgresql.png" alt-text="Screenshot showing options to import data to Azure Storage blob." border="true"::: + +### Step 8: Configure Pipeline + +1. From the left menu, select **Author** icon. + +1. Select **Pipelines**, and then select the Pipelines Actions ellipses menu (`...`). + + :::image type="content" source="media/search-power-query-connectors/author-pipelines.png" alt-text="Screenshot showing how to choose the Author icon and Pipelines option."::: + +1. Select **New pipeline**. + + :::image type="content" source="media/search-power-query-connectors/new-pipeline.png" alt-text="Screenshot showing how to choose a new Pipeline in Azure Data Factory."::: + +1. Create and configure the [Data Factory activities](../data-factory/concepts-pipelines-activities.md) that copy from PostgreSQL to Azure Storage container. + + - Expand **Move & transform** section and drag and drop **Copy Data** activity to the blank pipeline editor canvas. + + :::image type="content" source="media/search-power-query-connectors/postgresql-pipeline-general.png" alt-text="Screenshot showing how to drag and drop in Azure Data Factory to copy data from PostgreSQL." border="true"::: + + - Open the **General** tab, accept the default values, unless you need to customize the execution. + + - In the **Source** tab, select your PostgreSQL table. Leave the remaining options with the default values. + + :::image type="content" source="media/search-power-query-connectors/source-postgresql.png" alt-text="Screenshot showing how to configure Source to import data from PostgreSQL into Azure Storage blob in staging phase." border="true"::: + + - In the **Sink** tab: + - Select the Storage DelimitedText PostgreSQL dataset configured in [Step 7](#step-7-configure-storage-dataset). + - In **File Extension**, add *.csv* + - Leave the remaining options with the default values. + + :::image type="content" source="media/search-power-query-connectors/sink-storage-postgresql.png" alt-text="Screenshot showing how to configure sink to import data from PostgreSQL into Azure Storage blob." border="true"::: + + - Select **Save**. + +1. Configure the activities that copy from Azure Storage to a search index: + + - Expand **Move & transform** section and drag and drop **Copy Data** activity to the blank pipeline editor canvas. + + :::image type="content" source="media/search-power-query-connectors/index-from-storage-activity-postgresql.png" alt-text="Screenshot showing how to drag and drop in Azure Data Factory to configure a copy activity." border="true"::: + + - In the **General** tab, leave the default values, unless you need to customize the execution. + + - In the **Source** tab: + - Select the Storage source dataset configured in [Step 7](#step-7-configure-storage-dataset). + - In the **File path type** field, select *Wildcard file path*. + - Leave all remaining fields with default values. + + :::image type="content" source="media/search-power-query-connectors/source-storage-postgresql.png" alt-text="Screenshot showing how to configure Source for indexing from Storage to Azure Cognitive Search index." border="true"::: + + - In the **Sink** tab, select your Azure Cognitive Search index. Leave the remaining options with the default values. + + :::image type="content" source="media/search-power-query-connectors/sink-search-index-postgresql.png" alt-text="Screenshot showing how to configure Sink for indexing from Storage to Azure Cognitive Search index." border="true"::: + + - Select **Save**. + +### Step 9: Configure Activity order + +1. In the Pipeline canvas editor, select the little green square at the edge of the pipeline activity. Drag it to the "Indexes from Storage Account to Azure Cognitive Search" activity to set the execution order. + +1. Select **Save**. + + :::image type="content" source="media/search-power-query-connectors/pipeline-link-acitivities-postgresql.png" alt-text="Screenshot showing how to configure activity order in the pipeline for proper execution." border="true"::: + +### Step 10: Add a Pipeline trigger + +1. Select [Add trigger](../data-factory/how-to-create-schedule-trigger.md) to schedule the pipeline run and select **New/Edit**. + + :::image type="content" source="media/search-power-query-connectors/add-pipeline-trigger-postgresql.png" alt-text="Screenshot showing how to add a new trigger for a Pipeline in Data Factory." border="true"::: + +1. From the **Choose trigger** dropdown, select **New**. + + :::image type="content" source="media/search-power-query-connectors/choose-trigger-new.png" alt-text="Screenshot showing how to select adding a new trigger for a Pipeline in Data Factory." border="true"::: + +1. Review the trigger options to run the pipeline and select **OK**. + + :::image type="content" source="media/search-power-query-connectors/trigger-postgresql.png" alt-text="Screenshot showing how to configure a trigger to run a Pipeline in Data Factory." border="true"::: + +1. Select **Save**. + +1. Select **Publish**. + + :::image type="content" source="media/search-power-query-connectors/publish-pipeline-postgresql.png" alt-text="Screenshot showing how to Publish a Pipeline in Data Factory for PostgreSQL data copy." border="true"::: + +## Legacy content for Power Query connector preview + +A Power Query connector is used with a search indexer to automate data ingestion from various data sources, including those on other cloud providers. It uses [Power Query](/power-query/power-query-what-is-power-query) to retrieve the data. + +Data sources supported in the preview include: + Amazon Redshift + Elasticsearch @@ -30,18 +544,13 @@ Power Query connectors can reach a broader range of data sources, including thos + Smartsheet + Snowflake -This article shows you an Azure portal-based approach for setting up an indexer using Power Query connectors. Currently, there is no SDK support. - -> [!NOTE] -> Preview functionality is provided under [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) and is not recommended for production workloads. - -## Supported functionality +### Supported functionality Power Query connectors are used in indexers. An indexer in Azure Cognitive Search is a crawler that extracts searchable data and metadata from an external data source and populates an index based on field-to-field mappings between the index and your data source. This approach is sometimes referred to as a 'pull model' because the service pulls data in without you having to write any code that adds data to an index. Indexers provide a convenient way for users to index content from their data source without having to write their own crawler or push model. Indexers that reference Power Query data sources have the same level of support for skillsets, schedules, high water mark change detection logic, and most parameters that other indexers support. -## Prerequisites +### Prerequisites Before you start pulling data from one of the supported data sources, you'll want to make sure you have all your resources set up. @@ -51,7 +560,7 @@ Before you start pulling data from one of the supported data sources, you'll wan + Azure Blob Storage account, used as an intermediary for your data. The data will flow from your data source, then to Blob Storage, then to the index. This requirement only exists with the initial gated preview. -## Regional availability +### Regional availability The preview is only available on search services in the following regions: @@ -66,15 +575,15 @@ The preview is only available on search services in the following regions: + West US + West US 2 -## Preview limitations +### Preview limitations -There is a lot to be excited about with this preview, but there are a few limitations. This section describes the limitations that are specific to the current version of the preview. +This section describes the limitations that are specific to the current version of the preview. -+ Pulling binary data from your data source is not supported in this version of the preview. ++ Pulling binary data from your data source isn't supported. -+ [Debug sessions](cognitive-search-debug-session.md) are not supported at this time. ++ [Debug session](cognitive-search-debug-session.md) isn't supported. -## Getting started using the Azure portal +### Getting started using the Azure portal The Azure portal provides support for the Power Query connectors. By sampling data and reading metadata on the container, the Import data wizard in Azure Cognitive Search can create a default index, map source fields to target index fields, and load the index in a single operation. Depending on the size and complexity of source data, you could have an operational full text search index in minutes. @@ -82,44 +591,44 @@ The Azure portal provides support for the Power Query connectors. By sampling da > [!VIDEO https://www.youtube.com/embed/uy-l4xFX1EE] -### Step 1 – Prepare source data +#### Step 1 – Prepare source data Make sure your data source contains data. The Import data wizard reads metadata and performs data sampling to infer an index schema, but it also loads data from your data source. If the data is missing, the wizard will stop and return and error. -### Step 2 – Start Import data wizard +#### Step 2 – Start Import data wizard -After you're approved for the preview, the Azure Cognitive Search team will provide you with an Azure portal link that uses a feature flag so that you can access the Power Query connectors. Open this page and start the start the wizard from the command bar in the Azure Cognitive Search service page by selecting **Import data**. +After you're approved for the preview, the Azure Cognitive Search team will provide you with an Azure portal link that uses a feature flag so that you can access the Power Query connectors. Open this page and start the wizard from the command bar in the Azure Cognitive Search service page by selecting **Import data**. :::image type="content" source="media/search-import-data-portal/import-data-cmd.png" alt-text="Screenshot of the Import data command" border="true"::: -### Step 3 – Select your data source +#### Step 3 – Select your data source There are a few data sources that you can pull data from using this preview. All data sources that use Power Query will include a "Powered By Power Query" on their tile. Select your data source. :::image type="content" source="media/search-power-query-connectors/power-query-import-data.png" alt-text="Screenshot of the Select a data source page." border="true"::: -Once you've selected your data source, select **Next: Configure your data** to move to the next section. +After you've selected your data source, select **Next: Configure your data** to move to the next section. -### Step 4 – Configure your data +#### Step 4 – Configure your data -Once you've selected your data source, you'll configure your connection. Each data source will require different information. For a few data sources, the Power Query documentation provides additional details on how to connect to your data. +In this step, you'll configure your connection. Each data source will require different information. For a few data sources, the Power Query documentation provides more detail on how to connect to your data. + [PostgreSQL](/power-query/connectors/postgresql) + [Salesforce Objects](/power-query/connectors/salesforceobjects) + [Salesforce Reports](/power-query/connectors/salesforcereports) -Once you've provided your connection credentials, select **Next**. +After you've provided your connection credentials, select **Next**. -### Step 5 – Select your data +#### Step 5 – Select your data -The import wizard will preview various tables that are available in your data source. In this step you'll check one table that contains the data you want to import into your index. +The import wizard will preview various tables that are available in your data source. In this step, you'll check one table that contains the data you want to import into your index. :::image type="content" source="media/search-power-query-connectors/power-query-preview-data.png" alt-text="Screenshot of data preview." border="true"::: Once you've selected your table, select **Next**. -### Step 6 – Transform your data (Optional) +#### Step 6 – Transform your data (Optional) Power Query connectors provide you with a rich UI experience that allows you to manipulate your data so you can send the right data to your index. You can remove columns, filter rows, and much more. @@ -129,9 +638,9 @@ It's not required that you transform your data before importing it into Azure Co For more information about transforming data with Power Query, look at [Using Power Query in Power BI Desktop](/power-query/power-query-quickstart-using-power-bi). -Once you're done transforming your data, select **Next**. +After data is transformed, select **Next**. -### Step 7 – Add Azure Blob storage +#### Step 7 – Add Azure Blob storage The Power Query connector preview currently requires you to provide a blob storage account. This step only exists with the initial gated preview. This blob storage account will serve as temporary storage for data that moves from your data source to an Azure Cognitive Search index. @@ -145,17 +654,17 @@ You can get the connection string from the Azure portal by navigating to the sto After you've provided a data source name and connection string, select “Next: Add cognitive skills (Optional)”. -### Step 8 – Add cognitive skills (Optional) +#### Step 8 – Add cognitive skills (Optional) [AI enrichment](cognitive-search-concept-intro.md) is an extension of indexers that can be used to make your content more searchable. -This is an optional step for this preview. When complete, select **Next: Customize target index**. +You can add any enrichments that add benefit to your scenario. When complete, select **Next: Customize target index**. -### Step 9 – Customize target index +#### Step 9 – Customize target index On the Index page, you should see a list of fields with a data type and a series of checkboxes for setting index attributes. The wizard can generate a fields list based on metadata and by sampling the source data. -You can bulk-select attributes by clicking the checkbox at the top of an attribute column. Choose Retrievable and Searchable for every field that should be returned to a client app and subject to full text search processing. You'll notice that integers are not full text or fuzzy searchable (numbers are evaluated verbatim and are often useful in filters). +You can bulk-select attributes by selecting the checkbox at the top of an attribute column. Choose Retrievable and Searchable for every field that should be returned to a client app and subject to full text search processing. You'll notice that integers aren't full text or fuzzy searchable (numbers are evaluated verbatim and are often useful in filters). Review the description of index attributes and language analyzers for more information. @@ -165,7 +674,7 @@ Take a moment to review your selections. Once you run the wizard, physical data When complete, select **Next: Create an Indexer**. -### Step 10 – Create an indexer +#### Step 10 – Create an indexer The last step creates the indexer. Naming the indexer allows it to exist as a standalone resource, which you can schedule and manage independently of the index and data source object, created in the same wizard sequence. @@ -175,24 +684,26 @@ When creating the indexer, you can optionally choose to run the indexer on a sch :::image type="content" source="media/search-power-query-connectors/power-query-indexer-configuration.png" alt-text="Screenshot of Create your indexer page." border="true"::: -Once you've finished filling out this page select **Submit**. +After you've finished filling out this page select **Submit**. -## High Water Mark Change Detection policy +### High Water Mark Change Detection policy This change detection policy relies on a "high water mark" column capturing the version or time when a row was last updated. -### Requirements +#### Requirements + All inserts specify a value for the column. + All updates to an item also change the value of the column. + The value of this column increases with each insert or update. -## Unsupported column names +### Unsupported column names -Field names in an Azure Cognitive Search index have to meet certain requirements. One of these requirements is that some characters such as "/" are not allowed. If a column name in your database does not meet these requirements, the index schema detection will not recognize your column as a valid field name and you won't see that column listed as a suggested field for your index. Normally, using [field mappings](search-indexer-field-mappings.md) would solve this problem but field mappings are not supported in the portal. +Field names in an Azure Cognitive Search index have to meet certain requirements. One of these requirements is that some characters such as "/" aren't allowed. If a column name in your database does not meet these requirements, the index schema detection won't recognize your column as a valid field name and you won't see that column listed as a suggested field for your index. Normally, using [field mappings](search-indexer-field-mappings.md) would solve this problem but field mappings aren't supported in the portal. To index content from a column in your table that has an unsupported field name, rename the column during the "Transform your data" phase of the import data process. For example, you can rename a column named "Billing code/Zip code" to "zipcode". By renaming the column, the index schema detection will recognize it as a valid field name and add it as a suggestion to your index definition. ## Next steps -You have learned how to pull data from new data sources using the Power Query connectors. To learn more about indexers, see [Indexers in Azure Cognitive Search](search-indexer-overview.md). \ No newline at end of file +This article explained how to pull data using the Power Query connectors. Because this preview feature is discontinued, it also explains how to migrate existing solutions to a supported scenario. + +To learn more about indexers, see [Indexers in Azure Cognitive Search](search-indexer-overview.md). diff --git a/articles/search/search-howto-managed-identities-data-sources.md b/articles/search/search-howto-managed-identities-data-sources.md index 3b275c411946..2c241a57beec 100644 --- a/articles/search/search-howto-managed-identities-data-sources.md +++ b/articles/search/search-howto-managed-identities-data-sources.md @@ -168,7 +168,7 @@ You can use the Management REST API instead of the portal to assign a user-assig If your Azure resource is behind a firewall, make sure there's an inbound rule that admits requests from your search service. -+ For same-region connections to Azure Blob Storage or Azure Data Lake Storage Gen2, use a system managed identity and the [trusted service exception](search-indexer-howto-access-trusted-service-exception.md). Optionally, you can configure a [resource instance rule (preview)](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances-preview) to admit requests. ++ For same-region connections to Azure Blob Storage or Azure Data Lake Storage Gen2, use a system managed identity and the [trusted service exception](search-indexer-howto-access-trusted-service-exception.md). Optionally, you can configure a [resource instance rule](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances) to admit requests. + For all other resources and connections, [configure an IP firewall rule](search-indexer-howto-access-ip-restricted.md) that admits requests from Search. See [Indexer access to content protected by Azure network security features](search-indexer-securing-resources.md) for details. diff --git a/articles/search/search-howto-managed-identities-storage.md b/articles/search/search-howto-managed-identities-storage.md index e8a1eabebdc8..1c5654110852 100644 --- a/articles/search/search-howto-managed-identities-storage.md +++ b/articles/search/search-howto-managed-identities-storage.md @@ -30,7 +30,7 @@ This article assumes familiarity with indexer concepts and configuration. If you For a code example in C#, see [Index Data Lake Gen2 using Azure AD](https://github.com/Azure-Samples/azure-search-dotnet-samples/blob/master/data-lake-gen2-acl-indexing/README.md) on GitHub. > [!NOTE] -> If storage is network-protected and in the same region as your search service, you must use a system-assigned managed identity and either one of the following network options: [connect as a trusted service](search-indexer-howto-access-trusted-service-exception.md), or [connect using the resource instance rule (preview)](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances-preview). +> If storage is network-protected and in the same region as your search service, you must use a system-assigned managed identity and either one of the following network options: [connect as a trusted service](search-indexer-howto-access-trusted-service-exception.md), or [connect using the resource instance rule](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances). ## Prerequisites diff --git a/articles/search/search-indexer-howto-access-ip-restricted.md b/articles/search/search-indexer-howto-access-ip-restricted.md index edbca62897da..e1910ed19090 100644 --- a/articles/search/search-indexer-howto-access-ip-restricted.md +++ b/articles/search/search-indexer-howto-access-ip-restricted.md @@ -18,7 +18,7 @@ On behalf of an indexer, a search service will issue outbound calls to an extern This article explains how to find the IP address of your search service and configure an inbound IP rule on an Azure Storage account. While specific to Azure Storage, this approach also works for other Azure resources that use IP firewall rules for data access, such as Cosmos DB and Azure SQL. > [!NOTE] -> A storage account and your search service must be in different regions if you want to define IP firewall rules. If your setup doesn't permit this, try the [trusted service exception](search-indexer-howto-access-trusted-service-exception.md) or [resource instance rule](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances-preview) instead. +> A storage account and your search service must be in different regions if you want to define IP firewall rules. If your setup doesn't permit this, try the [trusted service exception](search-indexer-howto-access-trusted-service-exception.md) or [resource instance rule](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances) instead. ## Get a search service IP address diff --git a/articles/search/search-indexer-securing-resources.md b/articles/search/search-indexer-securing-resources.md index b8d790eaba35..a5575c94e29d 100644 --- a/articles/search/search-indexer-securing-resources.md +++ b/articles/search/search-indexer-securing-resources.md @@ -64,7 +64,7 @@ There are two options for supporting data access using the system identity: - Configure search to run as a [trusted service](search-indexer-howto-access-trusted-service-exception.md) and use the [trusted service exception](../storage/common/storage-network-security.md#trusted-access-based-on-a-managed-identity) in Azure Storage. -- Configure a [resource instance rule (preview)](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances-preview) in Azure Storage that admits inbound requests from an Azure resource. +- Configure a [resource instance rule](../storage/common/storage-network-security.md#grant-access-from-azure-resource-instances) in Azure Storage that admits inbound requests from an Azure resource. The above options depend on Azure Active Directory for authentication, which means that the connection must be made with an Azure AD login. Currently, only a Cognitive Search [system-assigned managed identity](search-howto-managed-identities-data-sources.md#create-a-system-managed-identity) is supported for same-region connections through a firewall. diff --git a/articles/search/search-what-is-data-import.md b/articles/search/search-what-is-data-import.md index 7ecfc14cc59b..5d3bc611d44f 100644 --- a/articles/search/search-what-is-data-import.md +++ b/articles/search/search-what-is-data-import.md @@ -71,7 +71,6 @@ The pull model crawls a supported data source and automatically uploads the data + [Azure Cosmos DB](search-howto-index-cosmosdb.md) + [Azure SQL Database, SQL Managed Instance, and SQL Server on Azure VMs](search-howto-connecting-azure-sql-database-to-azure-search-using-indexers.md) + [SharePoint in Microsoft 365 (preview)](search-howto-index-sharepoint-online.md) -+ [Power Query data connectors (preview)](search-how-to-index-power-query-data-sources.md) Indexers connect an index to a data source (usually a table, view, or equivalent structure), and map source fields to equivalent fields in the index. During execution, the rowset is automatically transformed to JSON and loaded into the specified index. All indexers support schedules so that you can specify how frequently the data is to be refreshed. Most indexers provide change tracking if the data source supports it. By tracking changes and deletes to existing documents in addition to recognizing new documents, indexers remove the need to actively manage the data in your index. diff --git a/articles/search/whats-new.md b/articles/search/whats-new.md index fdd02fff0707..65e64a4bd413 100644 --- a/articles/search/whats-new.md +++ b/articles/search/whats-new.md @@ -7,13 +7,19 @@ author: HeidiSteen ms.author: heidist ms.service: cognitive-search ms.topic: overview -ms.date: 03/17/2022 +ms.date: 05/27/2022 ms.custom: references_regions --- # What's new in Azure Cognitive Search Learn what's new in the service. Bookmark this page to keep up to date with service updates. Check out the [**Preview feature list**](search-api-preview.md) for an itemized list of features that are not yet approved for production workloads. +## May 2022 + +|Feature                         | Description | Availability | +|------------------------------------|--------------|---------------| +| [Power Query connector preview](search-how-to-index-power-query-data-sources.md) | This indexer data source was introduced in May 2021 but will not be moving forward. Please migrate your data ingestion code by November 2022. See the feature documentation for migration guidance. | Retired | + ## February 2022 |Feature                         | Description | Availability | diff --git a/articles/security/develop/secure-develop.md b/articles/security/develop/secure-develop.md index ad86796be3d6..ac2300f6392c 100644 --- a/articles/security/develop/secure-develop.md +++ b/articles/security/develop/secure-develop.md @@ -112,15 +112,6 @@ The verification phase involves a comprehensive effort to ensure that the code m You scan your application and its dependent libraries to identify any known vulnerable components. Products that are available to perform this scan include [OWASP Dependency Check](https://www.owasp.org/index.php/OWASP_Dependency_Check),[Snyk](https://snyk.io/), and [Black Duck](https://www.blackducksoftware.com/). -Vulnerability scanning powered by [Tinfoil Security](https://www.tinfoilsecurity.com/) is available for Azure App Service Web Apps. [Tinfoil Security scanning through App Service](https://azure.microsoft.com/blog/web-vulnerability-scanning-for-azure-app-service-powered-by-tinfoil-security/) offers developers and administrators a fast, integrated, and economical means of discovering and addressing vulnerabilities before a malicious actor can take advantage of them. - -> [!NOTE] -> You can also [integrate Tinfoil Security with Azure AD](../../active-directory/saas-apps/tinfoil-security-tutorial.md). Integrating Tinfoil Security with Azure AD provides you with the -following benefits: -> - In Azure AD, you can control who has access to Tinfoil Security. -> - Your users can be automatically signed in to Tinfoil Security (single sign-on) by using their Azure AD accounts. -> - You can manage your accounts in a single, central location, the Azure portal. - ### Test your application in an operating state Dynamic application security testing (DAST) is a process of testing an application in an operating state to find security vulnerabilities. DAST tools analyze programs while they are executing to find security vulnerabilities such as memory corruption, insecure server configuration, cross-site scripting, user privilege issues, SQL injection, and other critical security concerns. diff --git a/articles/security/fundamentals/encryption-atrest.md b/articles/security/fundamentals/encryption-atrest.md index 4bc496480c02..13582e9aa46d 100644 --- a/articles/security/fundamentals/encryption-atrest.md +++ b/articles/security/fundamentals/encryption-atrest.md @@ -76,7 +76,7 @@ Resource providers and application instances store the encrypted Data Encryption Microsoft Cloud services are used in all three cloud models: IaaS, PaaS, SaaS. Below you have examples of how they fit on each model: -- Software services, referred to as Software as a Server or SaaS, which have applications provided by the cloud such as Microsoft 365. +- Software services, referred to as Software as a Service or SaaS, which have applications provided by the cloud such as Microsoft 365. - Platform services in which customers use the cloud for things like storage, analytics, and service bus functionality in their applications. - Infrastructure services, or Infrastructure as a Service (IaaS) in which customer deploys operating systems and applications that are hosted in the cloud and possibly leveraging other cloud services. diff --git a/articles/security/fundamentals/overview.md b/articles/security/fundamentals/overview.md index bbca5bb3c0fb..0532ddb6b30e 100644 --- a/articles/security/fundamentals/overview.md +++ b/articles/security/fundamentals/overview.md @@ -92,10 +92,6 @@ Azure Monitor logs can be a useful tool in forensic and other security analysis, The section provides additional information regarding key features in application security and summary information about these capabilities. -### Web Application vulnerability scanning - -One of the easiest ways to get started with testing for vulnerabilities on your [App Service app](../../app-service/overview.md) is to use the [integration with Tinfoil Security](https://azure.microsoft.com/blog/web-vulnerability-scanning-for-azure-app-service-powered-by-tinfoil-security/) to perform one-click vulnerability scanning on your app. You can view the test results in an easy-to-understand report, and learn how to fix each vulnerability with step-by-step instructions. - ### Penetration Testing We don’t perform [penetration testing](./pen-testing.md) of your application for you, but we do understand that you want and need to perform testing on your own applications. That’s a good thing, because when you enhance the security of your applications you help make the entire Azure ecosystem more secure. While notifying Microsoft of pen testing activities is no longer required customers must still comply with the [Microsoft Cloud Penetration Testing Rules of Engagement](https://www.microsoft.com/msrc/pentest-rules-of-engagement). diff --git a/articles/security/fundamentals/technical-capabilities.md b/articles/security/fundamentals/technical-capabilities.md index 2e4b5ba564c8..3221886c8368 100644 --- a/articles/security/fundamentals/technical-capabilities.md +++ b/articles/security/fundamentals/technical-capabilities.md @@ -245,8 +245,6 @@ Azure also provides several easy-to-use features to help secure both inbound and - [Restrict access to your app by client's behavior - request frequency and concurrency](http://microsoftazurewebsitescheatsheet.info/#dynamic-ip-restrictions) -- [Scan your web app code for vulnerabilities using Tinfoil Security Scanning](https://azure.microsoft.com/blog/web-vulnerability-scanning-for-azure-app-service-powered-by-tinfoil-security/) - - [Configure TLS mutual authentication to require client certificates to connect to your web app](../../app-service/app-service-web-configure-tls-mutual-auth.md) - [Configure a client certificate for use from your app to securely connect to external resources](https://azure.microsoft.com/blog/using-certificates-in-azure-websites-applications/) diff --git a/articles/sentinel/design-your-workspace-architecture.md b/articles/sentinel/design-your-workspace-architecture.md index 13c72b469a36..6053dfa4997a 100644 --- a/articles/sentinel/design-your-workspace-architecture.md +++ b/articles/sentinel/design-your-workspace-architecture.md @@ -190,9 +190,9 @@ However, this recommendation for separate workspaces for non-SOC data comes from When planning to use resource-context or table level RBAC, consider the following information: -- [Decision tree note #7](#decision-tree): To configure resource-context RBAC for non-Azure resources, you may want to associate a Resource ID to the data when sending to Microsoft Sentinel, so that the permission can be scoped using resource-context RBAC. For more information, see [Explicitly configure resource-context RBAC](resource-context-rbac.md#explicitly-configure-resource-context-rbac) and [Access modes by deployment](../azure-monitor/logs/design-logs-deployment.md). +- [Decision tree note #7](#decision-tree): To configure resource-context RBAC for non-Azure resources, you may want to associate a Resource ID to the data when sending to Microsoft Sentinel, so that the permission can be scoped using resource-context RBAC. For more information, see [Explicitly configure resource-context RBAC](resource-context-rbac.md#explicitly-configure-resource-context-rbac) and [Access modes by deployment](../azure-monitor/logs/workspace-design.md). -- [Decision tree note #8](#decision-tree): [Resource permissions](../azure-monitor/logs/manage-access.md) or [resource-context](../azure-monitor/logs/design-logs-deployment.md) allows users to view logs only for resources that they have access to. The workspace access mode must be set to **User resource or workspace permissions**. Only tables relevant to the resources where the user has permissions will be included in search results from the **Logs** page in Microsoft Sentinel. +- [Decision tree note #8](#decision-tree): [Resource permissions](../azure-monitor/logs/manage-access.md) or [resource-context](../azure-monitor/logs/workspace-design.md) allows users to view logs only for resources that they have access to. The workspace access mode must be set to **User resource or workspace permissions**. Only tables relevant to the resources where the user has permissions will be included in search results from the **Logs** page in Microsoft Sentinel. - [Decision tree note #9](#decision-tree): [Table-level RBAC](../azure-monitor/logs/manage-access.md) allows you to define more granular control to data in a Log Analytics workspace in addition to the other permissions. This control allows you to define specific data types that are accessible only to a specific set of users. For more information, see [Table-level RBAC in Microsoft Sentinel](https://techcommunity.microsoft.com/t5/azure-sentinel/table-level-rbac-in-azure-sentinel/ba-p/965043). diff --git a/articles/sentinel/dns-normalization-schema.md b/articles/sentinel/dns-normalization-schema.md index 3b0881260c8c..a15950ba34c8 100644 --- a/articles/sentinel/dns-normalization-schema.md +++ b/articles/sentinel/dns-normalization-schema.md @@ -58,7 +58,7 @@ If your data source supports full DNS logging and you've chosen to log multiple For example, you might modify your query with the following normalization: ```kql -_Im_DNS | where SrcIpAddr != "127.0.0.1" and EventSubType == "response" +_Im_Dns | where SrcIpAddr != "127.0.0.1" and EventSubType == "response" ``` ## Parsers @@ -107,7 +107,7 @@ _Im_Dns (responsecodename = 'NXDOMAIN', starttime = ago(1d), endtime=now()) To filter only DNS queries for a specified list of domain names, use: ```kql -let torProxies=dynamic(["tor2web.org", "tor2web.com", "torlink.co",...]); +let torProxies=dynamic(["tor2web.org", "tor2web.com", "torlink.co"]); _Im_Dns (domain_has_any = torProxies) ``` > [!TIP] diff --git a/articles/sentinel/network-normalization-schema.md b/articles/sentinel/network-normalization-schema.md index 24a8756ff72a..0b9b9966c2a8 100644 --- a/articles/sentinel/network-normalization-schema.md +++ b/articles/sentinel/network-normalization-schema.md @@ -71,7 +71,7 @@ The following filtering parameters are available: For example, to filter only network sessions for a specified list of domain names, use: ```kql -let torProxies=dynamic(["tor2web.org", "tor2web.com", "torlink.co",...]); +let torProxies=dynamic(["tor2web.org", "tor2web.com", "torlink.co"]); _Im_NetworkSession (hostname_has_any = torProxies) ``` diff --git a/articles/sentinel/normalization-about-schemas.md b/articles/sentinel/normalization-about-schemas.md index 6cd2e040c782..2144cb54fc9b 100644 --- a/articles/sentinel/normalization-about-schemas.md +++ b/articles/sentinel/normalization-about-schemas.md @@ -214,7 +214,7 @@ The allowed values for a device ID type are: | **VectraId** | A Vectra AI assigned resource ID.| | **Other** | An ID type not listed above.| -For example, the Azure Monitor [VM Insights solution](/azure/azure-monitor/vm/vminsights-log-search) provides network sessions information in the `VMConnection`. The table provides an Azure Resource ID in the `_ResourceId` field and a VM insights specific device ID in the `Machine` field. Use the following mapping to represent those IDs: +For example, the Azure Monitor [VM Insights solution](../azure-monitor/vm/vminsights-log-search.md) provides network sessions information in the `VMConnection`. The table provides an Azure Resource ID in the `_ResourceId` field and a VM insights specific device ID in the `Machine` field. Use the following mapping to represent those IDs: | Field | Map to | | ----- | ----- | diff --git a/articles/sentinel/playbook-triggers-actions.md b/articles/sentinel/playbook-triggers-actions.md index 757971315cf3..41cba46011d8 100644 --- a/articles/sentinel/playbook-triggers-actions.md +++ b/articles/sentinel/playbook-triggers-actions.md @@ -114,7 +114,7 @@ Basic playbook to send incident details over mail: The **Entities** dynamic field is an array of JSON objects, each of which represents an entity. Each entity type has its own schema, depending on its unique properties. -The **"Entities - Get \"** action allows you to do the following: +The **"Entities - Get \"** action allows you to do the following: - Filter the array of entities by the requested type. - Parse the specific fields of this type, so they can be used as dynamic fields in further actions. diff --git a/articles/sentinel/process-events-normalization-schema.md b/articles/sentinel/process-events-normalization-schema.md index c15831276f3a..8e168f99cf74 100644 --- a/articles/sentinel/process-events-normalization-schema.md +++ b/articles/sentinel/process-events-normalization-schema.md @@ -98,7 +98,7 @@ The following list mentions fields that have specific guidelines for process act | Field | Class | Type | Description | |---------------------|-------------|------------|--------------------| | **EventType** | Mandatory | Enumerated | Describes the operation reported by the record.

    For Process records, supported values include:
    - `ProcessCreated`
    - `ProcessTerminated` | -| **EventSchemaVersion** | Mandatory | String | The version of the schema. The version of the schema documented here is `0.1.2` | +| **EventSchemaVersion** | Mandatory | String | The version of the schema. The version of the schema documented here is `0.1.3` | | **EventSchema** | Optional | String | The name of the schema documented here is `ProcessEvent`. | | **Dvc** fields| | | For process activity events, device fields refer to the system on which the process was executed. | @@ -190,7 +190,7 @@ The process event schema references the following entities, which are central to | **ParentProcessFileVersion** | Optional | String | The product version from the version information in parent process image file.

    Example: `7.9.5.0` | | **ParentProcessIsHidden** | Optional | Boolean | An indication of whether the parent process is in hidden mode. | | **ParentProcessInjectedAddress** | Optional | String | The memory address in which the responsible parent process is stored. | -| **ParentProcessId**| Mandatory | String | The process ID (PID) of the parent process.

    Example: `48610176` | +| **ParentProcessId**| Recommended | String | The process ID (PID) of the parent process.

    Example: `48610176` | | **ParentProcessGuid** | Optional | String | A generated unique identifier (GUID) of the parent process. Enables identifying the process across systems.

    Example: `EF3BD0BD-2B74-60C5-AF5C-010000001E00` | | **ParentProcessIntegrityLevel** | Optional | String | Every process has an integrity level that is represented in its token. Integrity levels determine the process level of protection or access.

    Windows defines the following integrity levels: **low**, **medium**, **high**, and **system**. Standard users receive a **medium** integrity level and elevated users receive a **high** integrity level.

    For more information, see [Mandatory Integrity Control - Win32 apps](/windows/win32/secauthz/mandatory-integrity-control). | | **ParentProcessMD5** | Optional | MD5 | The MD5 hash of the parent process image file.

    Example: `75a599802f1fa166cdadb360960b1dd0`| @@ -237,7 +237,7 @@ The process event schema references the following entities, which are central to | **HashType** | Recommended | String | The type of hash stored in the HASH alias field, allowed values are `MD5`, `SHA`, `SHA256`, `SHA512` and `IMPHASH`. | | **TargetProcessCommandLine** | Mandatory | String | The command line used to run the target process.

    Example: `"choco.exe" -v` | | **TargetProcessCurrentDirectory** | Optional | String | The current directory in which the target process is executed.

    Example: `c:\windows\system32` | -| **TargetProcessCreationTime** | Mandatory | DateTime | The product version from the version information of the target process image file. | +| **TargetProcessCreationTime** | Recommended | DateTime | The product version from the version information of the target process image file. | | **TargetProcessId**| Mandatory | String | The process ID (PID) of the target process.

    Example: `48610176`

    **Note**: The type is defined as *string* to support varying systems, but on Windows and Linux this value must be numeric.

    If you are using a Windows or Linux machine and used a different type, make sure to convert the values. For example, if you used a hexadecimal value, convert it to a decimal value. | | **TargetProcessGuid** | Optional | String |A generated unique identifier (GUID) of the target process. Enables identifying the process across systems.

    Example: `EF3BD0BD-2B74-60C5-AF5C-010000001E00` | | **TargetProcessIntegrityLevel** | Optional | String | Every process has an integrity level that is represented in its token. Integrity levels determine the process level of protection or access.

    Windows defines the following integrity levels: **low**, **medium**, **high**, and **system**. Standard users receive a **medium** integrity level and elevated users receive a **high** integrity level.

    For more information, see [Mandatory Integrity Control - Win32 apps](/windows/win32/secauthz/mandatory-integrity-control). | @@ -252,6 +252,10 @@ These are the changes in version 0.1.1 of the schema: These are the changes in version 0.1.2 of the schema - Added the fields `ActorUserType`, `ActorOriginalUserType`, `TargetUserType`, `TargetOriginalUserType`, and `HashType`. +These are the changes in version 0.1.3 of the schema + +- Changed the fields `ParentProcessId` and `TargetProcessCreationTime` from mandatory to recommended. + ## Next steps For more information, see: diff --git a/articles/sentinel/quickstart-onboard.md b/articles/sentinel/quickstart-onboard.md index b286dbbbdc8c..3ed9dd0cf6cb 100644 --- a/articles/sentinel/quickstart-onboard.md +++ b/articles/sentinel/quickstart-onboard.md @@ -26,7 +26,7 @@ After you connect your data sources, choose from a gallery of expertly created w - **Active Azure Subscription**. If you don't have one, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. -- **Log Analytics workspace**. Learn how to [create a Log Analytics workspace](../azure-monitor/logs/quick-create-workspace.md). For more information about Log Analytics workspaces, see [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/design-logs-deployment.md). +- **Log Analytics workspace**. Learn how to [create a Log Analytics workspace](../azure-monitor/logs/quick-create-workspace.md). For more information about Log Analytics workspaces, see [Designing your Azure Monitor Logs deployment](../azure-monitor/logs/workspace-design.md). By default, you may have a default of [30 days retention](../azure-monitor/logs/cost-logs.md#legacy-pricing-tiers) in the Log Analytics workspace used for Microsoft Sentinel. To make sure that you can use the full extent of Microsoft Sentinel functionality, raise this to 90 days. For more information, see [Configure data retention and archive policies in Azure Monitor Logs](../azure-monitor/logs/data-retention-archive.md). diff --git a/articles/sentinel/roles.md b/articles/sentinel/roles.md index d71a1ec9a97e..586688c43aa5 100644 --- a/articles/sentinel/roles.md +++ b/articles/sentinel/roles.md @@ -94,7 +94,7 @@ Consult the [Role recommendations](#role-recommendations) section for best pract - **Log Analytics RBAC**. You can use the Log Analytics advanced Azure role-based access control across the data in your Microsoft Sentinel workspace. This includes both data type-based Azure RBAC and resource-context Azure RBAC. For more information, see: - - [Manage log data and workspaces in Azure Monitor](../azure-monitor/logs/manage-access.md#manage-access-using-workspace-permissions) + - [Manage log data and workspaces in Azure Monitor](../azure-monitor/logs/manage-access.md#azure-rbac) - [Resource-context RBAC for Microsoft Sentinel](resource-context-rbac.md) - [Table-level RBAC](https://techcommunity.microsoft.com/t5/azure-sentinel/table-level-rbac-in-azure-sentinel/ba-p/965043) diff --git a/articles/sentinel/web-normalization-schema.md b/articles/sentinel/web-normalization-schema.md index 63b6a822c455..5e29a8432ef4 100644 --- a/articles/sentinel/web-normalization-schema.md +++ b/articles/sentinel/web-normalization-schema.md @@ -87,7 +87,7 @@ The following filtering parameters are available: For example, to filter only Web sessions for a specified list of domain names, use: ```kql -let torProxies=dynamic(["tor2web.org", "tor2web.com", "torlink.co",...]); +let torProxies=dynamic(["tor2web.org", "tor2web.com", "torlink.co"]); _Im_WebSession (url_has_any = torProxies) ``` diff --git a/articles/service-connector/how-to-integrate-signalr.md b/articles/service-connector/how-to-integrate-signalr.md index fc1437212221..436d57c8ea97 100644 --- a/articles/service-connector/how-to-integrate-signalr.md +++ b/articles/service-connector/how-to-integrate-signalr.md @@ -1,23 +1,26 @@ --- title: Integrate Azure SignalR Service with Service Connector -description: Integrate Azure SignalR Service into your application with Service Connector +description: Integrate Azure SignalR Service into your application with Service Connector. Learn about authentication types and client types of Azure SignalR Service. author: shizn ms.author: xshi ms.service: service-connector ms.topic: how-to -ms.date: 10/29/2021 -ms.custom: ignite-fall-2021, event-tier1-build-2022 +ms.date: 5/25/2022 +ms.custom: +- ignite-fall-2021 +- kr2b-contr-experiment +- event-tier1-build-2022 --- # Integrate Azure SignalR Service with Service Connector -This page shows the supported authentication types and client types of Azure SignalR Service using Service Connector. You might still be able to connect to Azure SignalR Service in other programming languages without using Service Connector. This page also shows default environment variable name and value (or Spring Boot configuration) you get when you create the service connection. You can learn more about [Service Connector environment variable naming convention](concept-service-connector-internals.md). +This article shows the supported authentication types and client types of Azure SignalR Service using Service Connector. This article also shows default environment variable name and value or Spring Boot configuration that you get when you create the service connection. For more information, see [Service Connector environment variable naming convention](concept-service-connector-internals.md). ## Supported compute service - Azure App Service -## Supported Authentication types and client types +## Supported authentication types and client types | Client Type | System-assigned Managed Identity | User-assigned Managed Identity | Secret/ConnectionString | Service Principal | | --- | --- | --- | --- | --- | @@ -27,33 +30,31 @@ This page shows the supported authentication types and client types of Azure Sig ### .NET -**Secret/ConnectionString** +- Secret/ConnectionString -| Default environment variable name | Description | Example value | -| --- | --- | --- | -| AZURE_SIGNALR_CONNECTIONSTRING | SignalR Service connection string | `Endpoint=https://{signalrName}.service.signalr.net;AccessKey={};Version=1.0;` | + | Default environment variable name | Description | Example value | + | --- | --- | --- | + | AZURE_SIGNALR_CONNECTIONSTRING | SignalR Service connection string | `Endpoint=https://{signalrName}.service.signalr.net;AccessKey={};Version=1.0;` | -**System-assigned Managed Identity** +- System-assigned Managed Identity -| Default environment variable name | Description | Example value | -| --- | --- | --- | -| AZURE_SIGNALR_CONNECTIONSTRING | SignalR Service connection string with Managed Identity | `Endpoint=https://{signalrName}.service.signalr.net;AuthType=aad;ClientId={};Version=1.0;` | + | Default environment variable name | Description | Example value | + | --- | --- | --- | + | AZURE_SIGNALR_CONNECTIONSTRING | SignalR Service connection string with Managed Identity | `Endpoint=https://{signalrName}.service.signalr.net;AuthType=aad;ClientId={};Version=1.0;` | -**User-assigned Managed Identity** +- User-assigned Managed Identity -| Default environment variable name | Description | Example value | -| --- | --- | --- | -| AZURE_SIGNALR_CONNECTIONSTRING | SignalR Service connection string with Managed Identity | `Endpoint=https://{signalrName}.service.signalr.net;AuthType=aad;ClientId={};Version=1.0;` | + | Default environment variable name | Description | Example value | + | --- | --- | --- | + | AZURE_SIGNALR_CONNECTIONSTRING | SignalR Service connection string with Managed Identity | `Endpoint=https://{signalrName}.service.signalr.net;AuthType=aad;ClientId={};Version=1.0;` | -**Service Principal** +- Service Principal -| Default environment variable name | Description | Example value | -| --- | --- | --- | -| AZURE_SIGNALR_CONNECTIONSTRING | SignalR Service connection string with Service Principal | `Endpoint=https://{signalrName}.service.signalr.net;AuthType=aad;ClientId={};ClientSecret={};TenantId={};Version=1.0;` | + | Default environment variable name | Description | Example value | + | --- | --- | --- | + | AZURE_SIGNALR_CONNECTIONSTRING | SignalR Service connection string with Service Principal | `Endpoint=https://{signalrName}.service.signalr.net;AuthType=aad;ClientId={};ClientSecret={};TenantId={};Version=1.0;` | ## Next steps -Follow the tutorials listed below to learn more about Service Connector. - > [!div class="nextstepaction"] > [Learn about Service Connector concepts](./concept-service-connector-internals.md) diff --git a/articles/service-connector/how-to-troubleshoot-front-end-error.md b/articles/service-connector/how-to-troubleshoot-front-end-error.md index 110f98894e3e..7ee0bb4aaa1e 100644 --- a/articles/service-connector/how-to-troubleshoot-front-end-error.md +++ b/articles/service-connector/how-to-troubleshoot-front-end-error.md @@ -1,36 +1,42 @@ --- -title: Service Connector Troubleshooting Guidance -description: Error list and suggested actions of Service Connector +title: Service Connector troubleshooting guidance +description: This article lists error messages and suggested actions of Service Connector to use for troubleshooting issues. author: shizn ms.author: xshi ms.service: service-connector -ms.custom: event-tier1-build-2022 -ms.topic: how-to -ms.date: 05/03/2022 +ms.topic: troubleshooting +ms.date: 5/25/2022 +ms.custom: +- ignite-fall-2021 +- kr2b-contr-experiment +- event-tier1-build-2022 --- # How to troubleshoot with Service Connector -If you come across an issue, you can refer to the error message to find suggested actions or fixes. This how-to guide shows you several options to troubleshoot Service Connector. +This article lists error messages and suggestions to troubleshoot Service Connector. -## Troubleshooting from the Azure portal +## Error message and suggested actions from the Azure portal | Error message | Suggested Action | | --- | --- | -| Unknown resource type |
    • Check source and target resource to verify whether the service types are supported by Service Connector.
    • Check whether the specified source-target connection combination is supported by Service Connector.
    | -| Unknown resource type |
    • Check whether the target resource exists.
    • Check the correctness of the target resource ID.
    | -| Unsupported resource |
    • Check whether the authentication type is supported by the specified source-target connection combination.
    | +| Unknown resource type | Check source and target resource to verify whether the service types are supported by Service Connector. | +| | Check whether the specified source-target connection combination is supported by Service Connector. | +| | Check whether the target resource exists. | +| | Check the correctness of the target resource ID. | +| Unsupported resource | Check whether the authentication type is supported by the specified source-target connection combination. | -### Troubleshooting using the Azure CLI - -#### InvalidArgumentValueError +## Error type,error message, and suggested actions using Azure CLI +### InvalidArgumentValueError | Error message | Suggested Action | | --- | --- | -| The source resource ID is invalid: `{SourceId}` |
    • Check whether the source resource ID supported by Service Connector.
    • Check the correctness of source resource ID.
    | -| Target resource ID is invalid: `{TargetId}` |
    • Check whether the target service type is supported by Service Connector.
    • Check the correctness of target resource ID.
    | -| Connection ID is invalid: `{ConnectionId}` |
    • Check the correctness of the connection ID.
    | +| The source resource ID is invalid: `{SourceId}` | Check whether the source resource ID supported by Service Connector. | +| | Check the correctness of source resource ID. | +| Target resource ID is invalid: `{TargetId}` | Check whether the target service type is supported by Service Connector. | +| | Check the correctness of target resource ID. | +| Connection ID is invalid: `{ConnectionId}` | Check the correctness of the connection ID. | #### RequiredArgumentMissingError @@ -38,19 +44,19 @@ If you come across an issue, you can refer to the error message to find suggeste | Error message | Suggested Action | | --- | --- | | `{Argument}` shouldn't be blank | User should provide argument value for interactive input | -| Required keys missing for parameter `{Parameter}`. All possible keys are: `{Keys}` | Provide value for the auth info parameter, usually in the form of `--param key1=val1 key2=val2`. | +| Required keys missing for parameter `{Parameter}`. All possible keys are: `{Keys}` | Provide value for the authentication information parameter, usually in the form of `--param key1=val1 key2=val2`. | | Required argument is missing, please provide the arguments: `{Arguments}` | Provide the required argument. | #### ValidationError | Error message | Suggested Action | | --- | --- | -| Only one auth info is needed | User can only provide one auth info parameter. Check whether auth info is missing or multiple auth info parameters are provided. | -| Auth info argument should be provided when updating the connection: `{ConnectionName}` | When you update a secret type connection, auth info parameter should be provided. This error occurs because user's secret cannot be accessed through the ARM API. -| Either client type or auth info should be specified to update | When you update a connection, either client type or auth info should be provided. | -| Usage error: {} [KEY=VALUE ...] | Check the available keys and provide values for the auth info parameter, usually in the form of `--param key1=val1 key2=val2`. | -| Unsupported Key `{Key}` is provided for parameter `{Parameter}`. All possible keys are: `{Keys}` | Check the available keys and provide values for the auth info parameter, usually in the form of `--param key1=val1 key2=val2`. | -| Provision failed, please create the target resource manually and then create the connection. Error details: `{ErrorTrace}` |
    • Retry.
    • Create the target resource manually and then create the connection.
    | +| Only one auth info is needed | User can only provide one authentication information parameter. Check whether it isn't provided or multiple parameters are provided. | +| Auth info argument should be provided when updating the connection: `{ConnectionName}` | The authentication information should be provided when updating a secret type connection. This error occurs because a user's secret can't be accessed through the Azure Resource Manager API. | +| Either client type or auth info should be specified to update | Either client type or authentication information should be provided when updating a connection. | +| Usage error: `{} [KEY=VALUE ...]` | Check the available keys and provide values for the auth info parameter, usually in the form of `--param key1=val1 key2=val2`. | +| Unsupported Key `{Key}` is provided for parameter `{Parameter}`. All possible keys are: `{Keys}` | Check the available keys and provide values for the authentication information parameter, usually in the form of `--param key1=val1 key2=val2`. | +| Provision failed, please create the target resource manually and then create the connection. Error details: `{ErrorTrace}` | Retry. Create the target resource manually and then create the connection. | ## Next steps diff --git a/articles/service-connector/index.yml b/articles/service-connector/index.yml index ac91ac609243..1a89fda75a99 100644 --- a/articles/service-connector/index.yml +++ b/articles/service-connector/index.yml @@ -9,9 +9,9 @@ metadata: ms.service: service-connector ms.custom: event-tier1-build-2022 ms.topic: landing-page - author: shizn - ms.author: xshi - ms.date: 10/29/2021 + author: maud-lv + ms.author: malev + ms.date: 05/24/2022 # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new landingContent: diff --git a/articles/service-connector/overview.md b/articles/service-connector/overview.md index 54d877596d9c..3681acacb1c8 100644 --- a/articles/service-connector/overview.md +++ b/articles/service-connector/overview.md @@ -6,12 +6,12 @@ ms.author: xshi ms.service: service-connector ms.custom: event-tier1-build-2022 ms.topic: overview -ms.date: 05/03/2022 +ms.date: 05/24/2022 --- # What is Service Connector? -The Service Connector service helps you connect Azure compute services to other backing services. This service configures the network settings and connection information (for example, generating environment variables) between compute services and target backing services in management plane. Developers use their preferred SDK or library that consumes the connection information to do data plane operations against the target backing service. +Service Connector helps you connect Azure compute services to other backing services. This service configures the network settings and connection information (for example, generating environment variables) between compute services and target backing services in management plane. Developers use their preferred SDK or library that consumes the connection information to do data plane operations against the target backing service. This article provides an overview of Service Connector. @@ -26,9 +26,9 @@ Any application that runs on Azure compute services and requires a backing servi See [what services are supported in Service Connector](#what-services-are-supported-in-service-connector) to see more supported services and application patterns. -## What are the benefits using Service Connector? +## What are the benefits to using Service Connector? -**Connect to target backing service with just a single command or a few clicks:** +**Connect to a target backing service with just a single command or a few clicks:** Service Connector is designed for your ease of use. To create a connection, you'll need three required parameters: a target service instance, an authentication type between the compute service and the target service, and your application client type. Developers can use the Azure CLI or the guided Azure portal experience to create connections. @@ -42,6 +42,7 @@ Once a service connection is created, developers can validate and check the heal * Azure App Service * Azure Spring Cloud +* Azure Container Apps **Target Services:** @@ -62,7 +63,7 @@ Once a service connection is created, developers can validate and check the heal There are two major ways to use Service Connector for your Azure application: * **Azure CLI:** Create, list, validate and delete service-to-service connections with connection commands in the Azure CLI. -* **Azure Portal:** Use the guided portal experience to create service-to-service connections and manage connections with a hierarchy list. +* **Azure portal:** Use the guided portal experience to create service-to-service connections and manage connections with a hierarchy list. ## Next steps diff --git a/articles/service-connector/quickstart-cli-container-apps.md b/articles/service-connector/quickstart-cli-container-apps.md new file mode 100644 index 000000000000..f6a59de8769b --- /dev/null +++ b/articles/service-connector/quickstart-cli-container-apps.md @@ -0,0 +1,100 @@ +--- +title: Quickstart - Create a service connection in Container Apps using the Azure CLI +description: Quickstart showing how to create a service connection in Azure Container Apps using the Azure CLI +author: maud-lv +ms.author: malev +ms.service: service-connector +ms.topic: quickstart +ms.date: 05/24/2022 +ms.devlang: azurecli +--- + +# Quickstart: Create a service connection in Container Apps with the Azure CLI + +This quickstart shows you how to create a service connection in Container Apps with the Azure CLI. The [Azure CLI](/cli/azure) is a set of commands used to create and manage Azure resources. The Azure CLI is available across Azure services and is designed to get you working quickly with Azure, with an emphasis on automation. + +[!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] + +[!INCLUDE [azure-cli-prepare-your-environment.md](../../includes/azure-cli-prepare-your-environment.md)] + +- Version 2.37.0 or higher of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. + +- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/dotnet). + +- An application deployed to Container Apps in a [region supported by Service Connector](./concept-region-support.md). If you don't have one yet, [create and deploy a container to Container Apps](../container-apps/quickstart-portal.md). + +> [!IMPORTANT] +> Service Connector in Container Apps is currently in preview. +> See the [Supplemental Terms of Use for Microsoft Azure Previews](https://azure.microsoft.com/support/legal/preview-supplemental-terms/) for legal terms that apply to Azure features that are in beta, preview, or otherwise not yet released into general availability. + +## View supported target services + +Use the following Azure CLI command to create and manage service connections from Container Apps. + +```azurecli-interactive +az provider register -n Microsoft.ServiceLinker +az containerapp connection list-support-types --output table +``` + +## Create a service connection + +### [Using an access key](#tab/using-access-key) + +1. Use the following Azure CLI command to create a service connection from Container Apps to a Blob Storage with an access key. + + ```azurecli-interactive + az containerapp connection create storage-blob --secret + ``` + +1. Provide the following information at the Azure CLI's request: + + - **The resource group which contains the container app**: the name of the resource group with the container app. + - **Name of the container app**: the name of your container app. + - **The container where the connection information will be saved:** the name of the container, in your container app, that connects to the target service + - **The resource group which contains the storage account:** the name of the resource group name with the storage account. In this guide, we're using a Blob Storage. + - **Name of the storage account:** the name of the storage account that contains your blob. + +> [!NOTE] +> If you don't have a Blob Storage, you can run `az containerapp connection create storage-blob --new --secret` to provision a new Blob Storage and directly get connected to your app service. + +### [Using a managed identity](#tab/using-managed-identity) + +> [!IMPORTANT] +> Using a managed identity requires you have the permission to [Azure AD role assignment](../active-directory/managed-identities-azure-resources/howto-assign-access-portal.md). Without this permission, your connection creation will fail. Ask your subscription owner to grant you this permission, or use an access key instead to create the connection. + +1. Use the following Azure CLI command to create a service connection from Container Apps to a Blob Storage with a system-assigned managed identity. + + ```azurecli-interactive + az containerapp connection create storage-blob --system-identity + ``` + +1. Provide the following information at the Azure CLI's request: + + - **The resource group which contains the container app**: the name of the resource group with the container app. + - **Name of the container app**: the name of your container app. + - **The container where the connection information will be saved:** the name of the container, in your container app, that connects to the target service + - **The resource group which contains the storage account:** the name of the resource group name with the storage account. In this guide, we're using a Blob Storage. + - **Name of the storage account:** the name of the storage account that contains your blob. + +> [!NOTE] +> If you don't have a Blob Storage, you can run `az containerapp connection create storage-blob --new --system-identity` to provision a new Blob Storage and directly get connected to your app service. + +--- + +## View connections + + Use the Azure CLI command `az containerapp connection list` to list all your container app's provisioned connections. Provide the following information: + +- **Source compute service resource group name:** the resource group name of the container app. +- **Container app name:** the name of your container app. + +```azurecli-interactive +az containerapp connection list -g "" --name "" --output table +``` + +The output also displays the provisioning state of your connections: failed or succeeded. + +## Next steps + +> [!div class="nextstepaction"] +> [Service Connector internals](./concept-service-connector-internals.md) diff --git a/articles/service-connector/quickstart-cli-spring-cloud-connection.md b/articles/service-connector/quickstart-cli-spring-cloud-connection.md index 7e36a04ff5c1..58c5ca1f98d7 100644 --- a/articles/service-connector/quickstart-cli-spring-cloud-connection.md +++ b/articles/service-connector/quickstart-cli-spring-cloud-connection.md @@ -5,7 +5,7 @@ author: shizn ms.author: xshi ms.service: service-connector ms.topic: quickstart -ms.date: 05/03/2022 +ms.date: 03/24/2022 ms.devlang: azurecli ms.custom: event-tier1-build-2022 --- @@ -22,10 +22,9 @@ The [Azure CLI](/cli/azure) is a set of commands used to create and manage Azure - At least one Spring Cloud application running on Azure. If you don't have a Spring Cloud application, [create one](../spring-cloud/quickstart.md). - ## View supported target service types -Use the Azure CLI [[az spring-cloud connection](quickstart-cli-spring-cloud-connection.md)] command to create and manage service connections to your Spring Cloud application. +Use the Azure CLI [[az spring-cloud connection](quickstart-cli-spring-cloud-connection.md)] command to create and manage service connections to your Spring Cloud application. ```azurecli-interactive az provider register -n Microsoft.ServiceLinker @@ -34,9 +33,9 @@ az spring-cloud connection list-support-types --output table ## Create a service connection -#### [Using an access key](#tab/Using-access-key) +### [Using an access key](#tab/Using-access-key) -Use the Azure CLI [az spring-cloud connection]() command to create a service connection to an Azure Blob Storage with an access key, providing the following information: +Use the Azure CLI command `az spring-cloud connection` to create a service connection to an Azure Blob Storage with an access key, providing the following information: - **Spring Cloud resource group name:** the resource group name of the Spring Cloud. - **Spring Cloud name:** the name of your Spring Cloud. @@ -51,12 +50,12 @@ az spring-cloud connection create storage-blob --secret > [!NOTE] > If you don't have a Blob Storage, you can run `az spring-cloud connection create storage-blob --new --secret` to provision a new one and directly get connected to your app service. -#### [Using Managed Identity](#tab/Using-Managed-Identity) +### [Using Managed Identity](#tab/Using-Managed-Identity) > [!IMPORTANT] > To use Managed Identity, you must have permission to manage [role assignments in Azure Active Directory](../active-directory/managed-identities-azure-resources/howto-assign-access-portal.md). If you don't have this permission, your connection creation will fail. You can ask your subscription owner to grant you a role assignment permission or use an access key to create the connection. -Use the Azure CLI [az spring-cloud connection](quickstart-cli-spring-cloud-connection.md) command to create a service connection to a Blob Storage with System-assigned Managed Identity, providing the following information: +Use the Azure CLI command `az spring-cloud connection` to create a service connection to a Blob Storage with System-assigned Managed Identity, providing the following information: - **Spring Cloud resource group name:** the resource group name of the Spring Cloud. - **Spring Cloud name:** the name of your Spring Cloud. @@ -86,5 +85,5 @@ az spring-cloud connection list -g --spring-c Follow the tutorials listed below to start building your own application with Service Connector. > [!div class="nextstepaction"] -> - [Tutorial: Spring Cloud + MySQL](./tutorial-java-spring-mysql.md) -> - [Tutorial: Spring Cloud + Apache Kafka on Confluent Cloud](./tutorial-java-spring-confluent-kafka.md) +> [Tutorial: Spring Cloud + MySQL](./tutorial-java-spring-mysql.md) +> [Tutorial: Spring Cloud + Apache Kafka on Confluent Cloud](./tutorial-java-spring-confluent-kafka.md) diff --git a/articles/service-connector/quickstart-portal-spring-cloud-connection.md b/articles/service-connector/quickstart-portal-spring-cloud-connection.md index b9d22d63b72a..c63e49ba7741 100644 --- a/articles/service-connector/quickstart-portal-spring-cloud-connection.md +++ b/articles/service-connector/quickstart-portal-spring-cloud-connection.md @@ -1,12 +1,15 @@ --- -title: Quickstart - Create a service connection in Spring Cloud from the Azure portal -description: Quickstart showing how to create a service connection in Spring Cloud from Azure portal +title: Create a service connection in Spring Cloud from Azure portal +description: This quickstart shows you how to create a service connection in Spring Cloud from the Azure portal. author: shizn ms.author: xshi ms.service: service-connector -ms.custom: event-tier1-build-2022 -ms.topic: overview -ms.date: 05/03/2022 +ms.topic: quickstart +ms.date: 5/25/2022 +ms.custom: +- ignite-fall-2021 +- kr2b-contr-experiment +- event-tier1-build-2022 --- # Quickstart: Create a service connection in Spring Cloud from the Azure portal @@ -37,7 +40,7 @@ Sign in to the Azure portal at [https://portal.azure.com/](https://portal.azure. | **Storage account** | Your storage account | The target storage account you want to connect to. If you choose a different service type, select the corresponding target service instance. | 1. Select **Next: Authentication** to select the authentication type. Then select **Connection string** to use access key to connect your Blob storage account. -1. Then select **Next: Review + Create** to review the provided information. Then select **Create** to create the service connection. It might take 1 minute to complete the operation. +1. Then select **Next: Review + Create** to review the provided information. Then select **Create** to create the service connection. It might take one minute to complete the operation. ## View service connections in Spring Cloud @@ -45,7 +48,7 @@ Sign in to the Azure portal at [https://portal.azure.com/](https://portal.azure. 1. Select **>** to expand the list and access the properties required by your Spring boot application. -1. Select the ellipsis **...** and **Validate**. You can see the connection validation details in the pop-up blade from the right. +1. Select the ellipsis **...** and **Validate**. You can see the connection validation details in the context pane from the right. ## Next steps diff --git a/articles/service-connector/toc.yml b/articles/service-connector/toc.yml index 19c0fd4f35a6..78c27cb5b059 100644 --- a/articles/service-connector/toc.yml +++ b/articles/service-connector/toc.yml @@ -9,19 +9,25 @@ items: - name: Service Connector in Azure App Service expanded: true items: - - name: Portal + - name: Azure portal href: quickstart-portal-app-service-connection.md - - name: CLI + - name: Azure CLI href: quickstart-cli-app-service-connection.md - name: Service Connector in Azure Spring Cloud expanded: true items: - - name: Portal + - name: Azure portal href: quickstart-portal-spring-cloud-connection.md - - name: CLI + - name: Azure CLI href: quickstart-cli-spring-cloud-connection.md - name: Service Connector in Container Apps - href: quickstart-portal-container-apps.md + expanded: true + items: + - name: Azure portal + href: quickstart-portal-container-apps.md + - name: Azure CLI + href: quickstart-cli-container-apps.md + - name: Tutorials expanded: true items: diff --git a/articles/service-connector/tutorial-portal-key-vault.md b/articles/service-connector/tutorial-portal-key-vault.md index f5b1f72db5d5..278c38383254 100644 --- a/articles/service-connector/tutorial-portal-key-vault.md +++ b/articles/service-connector/tutorial-portal-key-vault.md @@ -117,7 +117,7 @@ Now you can create a service connection to another target service and directly s 1. Select **Secrets** in the Key Vault left ToC, and select the blob storage secret name. > [!TIP] - > Don't have permission to list secrets? Refer to [troubleshooting](/azure/key-vault/general/troubleshooting-access-issues#i-am-not-able-to-list-or-get-secretskeyscertificate-i-am-seeing-something-went-wrong-error). + > Don't have permission to list secrets? Refer to [troubleshooting](../key-vault/general/troubleshooting-access-issues.md#i-am-not-able-to-list-or-get-secretskeyscertificate-i-am-seeing-something-went-wrong-error). 4. Select a version ID from the Current Version list. @@ -130,4 +130,4 @@ When no longer needed, delete the resource group and all related resources creat ## Next steps > [!div class="nextstepaction"] -> [Service Connector internals](./concept-service-connector-internals.md) +> [Service Connector internals](./concept-service-connector-internals.md) \ No newline at end of file diff --git a/articles/service-fabric/how-to-deploy-service-fabric-application-system-assigned-managed-identity.md b/articles/service-fabric/how-to-deploy-service-fabric-application-system-assigned-managed-identity.md index 8e84931d769d..d8f74e7cea2a 100644 --- a/articles/service-fabric/how-to-deploy-service-fabric-application-system-assigned-managed-identity.md +++ b/articles/service-fabric/how-to-deploy-service-fabric-application-system-assigned-managed-identity.md @@ -3,11 +3,14 @@ title: Deploy a Service Fabric app with system-assigned MI description: This article shows you how to assign a system-assigned managed identity to an Azure Service Fabric application ms.topic: article -ms.date: 07/25/2019 +ms.date: 05/25/2022 --- # Deploy Service Fabric application with system-assigned managed identity +> [!NOTE] +> Enabling identity for an existing app which was initially deployed using Azure cmdlets is not supported. + In order to access the managed identity feature for Azure Service Fabric applications, you must first enable the Managed Identity Token Service on the cluster. This service is responsible for the authentication of Service Fabric applications using their managed identities, and for obtaining access tokens on their behalf. Once the service is enabled, you can see it in Service Fabric Explorer under the **System** section in the left pane, running under the name **fabric:/System/ManagedIdentityTokenService** next to other system services. > [!NOTE] diff --git a/articles/service-fabric/how-to-patch-cluster-nodes-windows.md b/articles/service-fabric/how-to-patch-cluster-nodes-windows.md index 8b2dd36b932e..0ec7f741f598 100644 --- a/articles/service-fabric/how-to-patch-cluster-nodes-windows.md +++ b/articles/service-fabric/how-to-patch-cluster-nodes-windows.md @@ -31,7 +31,7 @@ When enabling automatic OS updates, you'll also need to disable Windows Update i > Service Fabric does not support in-VM upgrades where Windows Updates applies operating system patches without replacing the OS disk. > [!NOTE] -> When managed disks are used ensure that Custom Extension script for mapping managed disks to drive letters handles reimage of the VM correctly. See [Create a Service Fabric cluster with attached data disks](/azure/Virtual-machine-scale-sets/virtual-machine-scale-sets-attached-disks#create-a-service-fabric-cluster-with-attached-data-disks) for an example script that handles reimage of VMs with managed disks correctly. +> When managed disks are used ensure that Custom Extension script for mapping managed disks to drive letters handles reimage of the VM correctly. See [Create a Service Fabric cluster with attached data disks](../virtual-machine-scale-sets/virtual-machine-scale-sets-attached-disks.md#create-a-service-fabric-cluster-with-attached-data-disks) for an example script that handles reimage of VMs with managed disks correctly. 1. Enable automatic OS image upgrades and disable Windows Updates in the deployment template: diff --git a/articles/service-fabric/service-fabric-application-lifecycle.md b/articles/service-fabric/service-fabric-application-lifecycle.md index 86c97a15e8d4..79f84a010229 100644 --- a/articles/service-fabric/service-fabric-application-lifecycle.md +++ b/articles/service-fabric/service-fabric-application-lifecycle.md @@ -1,9 +1,12 @@ --- title: Application lifecycle in Service Fabric description: Describes developing, deploying, testing, upgrading, maintaining, and removing Service Fabric applications. - +service: service-fabric +ms.service: service-fabric +author: tomvcassidy +ms.author: tomcassidy ms.topic: conceptual -ms.date: 1/19/2018 +ms.date: 05/25/2022 --- # Service Fabric application lifecycle As with other platforms, an application on Azure Service Fabric usually goes through the following phases: design, development, testing, deployment, upgrading, maintenance, and removal. Service Fabric provides first-class support for the full application lifecycle of cloud applications, from development through deployment, daily management, and maintenance to eventual decommissioning. The service model enables several different roles to participate independently in the application lifecycle. This article provides an overview of the APIs and how they are used by the different roles throughout the phases of the Service Fabric application lifecycle. @@ -75,6 +78,53 @@ See the [Application upgrade tutorial](service-fabric-application-upgrade-tutori See [Deploy an application](service-fabric-deploy-remove-applications.md) for examples. +## Preserving disk space in cluster image store + +The ImageStoreService keeps copied and provisioned packages, which can lead to accumulation of files. File accumulation can cause the ImageStoreService (fabric:/System/ImageStoreService) to fill up the disk and can increase the build time for ImageStoreService replicas. + +To avoid file accumulation, use the following provisioning sequence: + +1. Copy package to ImageStore, and use the compress option + +1. Provision the package + +1. Remove the package in the image store + +1. Upgrade the application/cluster + +1. Unprovision the old version + +Steps 3 and 5 in the procedure above prevent the accumulation of files in the image store. + +### Configuration for automatic cleanup + +You can automate step 3 above using PowerShell or XML. This will cause the application package to be automatically deleted after the successful registration of the application type. + +[PowerShell](/powershell/module/servicefabric/register-servicefabricapplicationtype?view=azureservicefabricps&preserve-view=true): + +```powershell +Register-ServiceFabricApplicationTye -ApplicationPackageCleanupPolicy Automatic +``` + +XML: + +```xml +
    + +
    +``` + +You can automate step 5 above using XML. This will cause unused application types to be automatically unregistered. + +```xml +
    + + + + +
    +``` + ## Cleaning up files and data on nodes The replication of application files will distribute eventually the files to all nodes depending on balancing actions. This can create disk pressure depending on the number of applications and their file size. diff --git a/articles/service-fabric/service-fabric-cluster-capacity.md b/articles/service-fabric/service-fabric-cluster-capacity.md index f968422b79b2..4a862e15201d 100644 --- a/articles/service-fabric/service-fabric-cluster-capacity.md +++ b/articles/service-fabric/service-fabric-cluster-capacity.md @@ -157,7 +157,7 @@ The capacity needs of your cluster will be determined by your specific workload - Partial / single core VM sizes like Standard A0 are not supported. - *A-series* VM sizes are not supported for performance reasons. - Low-priority VMs are not supported. -- [B-Series Burstable SKU's](https://docs.microsoft.com/azure/virtual-machines/sizes-b-series-burstable) are not supported. +- [B-Series Burstable SKU's](../virtual-machines/sizes-b-series-burstable.md) are not supported. #### Primary node type @@ -191,4 +191,4 @@ For more on cluster planning, see: * [Disaster recovery planning](service-fabric-disaster-recovery.md) -[SystemServices]: ./media/service-fabric-cluster-capacity/SystemServices.png +[SystemServices]: ./media/service-fabric-cluster-capacity/SystemServices.png \ No newline at end of file diff --git a/articles/service-fabric/service-fabric-cluster-fabric-settings.md b/articles/service-fabric/service-fabric-cluster-fabric-settings.md index 4cbb391025ff..ed4b40a6b918 100644 --- a/articles/service-fabric/service-fabric-cluster-fabric-settings.md +++ b/articles/service-fabric/service-fabric-cluster-fabric-settings.md @@ -60,8 +60,8 @@ The following is a list of Fabric settings that you can customize, organized by | **Parameter** | **Allowed Values** | **Upgrade Policy** | **Guidance or Short Description** | | --- | --- | --- | --- | |DeployedState |wstring, default is L"Disabled" |Static |2-stage removal of CSS. | -|EnableSecretMonitoring|bool, default is FALSE |Static |Must be enabled to use Managed KeyVaultReferences. Default may become true in the future. For more information, see [KeyVaultReference support for Azure-deployed Service Fabric Applications](https://docs.microsoft.com/azure/service-fabric/service-fabric-keyvault-references)| -|SecretMonitoringInterval|TimeSpan, default is Common::TimeSpan::FromMinutes(15) |Static |The rate at which Service Fabric will poll Key Vault for changes when using Managed KeyVaultReferences. This rate is a best effort, and changes in Key Vault may be reflected in the cluster earlier or later than the interval. For more information, see [KeyVaultReference support for Azure-deployed Service Fabric Applications](https://docs.microsoft.com/azure/service-fabric/service-fabric-keyvault-references) | +|EnableSecretMonitoring|bool, default is FALSE |Static |Must be enabled to use Managed KeyVaultReferences. Default may become true in the future. For more information, see [KeyVaultReference support for Azure-deployed Service Fabric Applications](./service-fabric-keyvault-references.md)| +|SecretMonitoringInterval|TimeSpan, default is Common::TimeSpan::FromMinutes(15) |Static |The rate at which Service Fabric will poll Key Vault for changes when using Managed KeyVaultReferences. This rate is a best effort, and changes in Key Vault may be reflected in the cluster earlier or later than the interval. For more information, see [KeyVaultReference support for Azure-deployed Service Fabric Applications](./service-fabric-keyvault-references.md) | |UpdateEncryptionCertificateTimeout |TimeSpan, default is Common::TimeSpan::MaxValue |Static |Specify timespan in seconds. The default has changed to TimeSpan::MaxValue; but overrides are still respected. May be deprecated in the future. | @@ -659,7 +659,7 @@ The following is a list of Fabric settings that you can customize, organized by |ReplicationBatchSize|uint, default is 1|Static|Specifies the number of operations to be sent between primary and secondary replicas. If zero the primary sends one record per operation to the secondary. Otherwise the primary replica aggregates log records until the config value is reached. This will reduce network traffic.| ## Replication - **Warning Note** : Changing Replication/TranscationalReplicator settings at cluster level changes settings for all stateful services include system services. This is generally not recommended. See this document [Configure Azure Service Fabric Reliable Services - Azure Service Fabric | Microsoft Docs](https://docs.microsoft.com/azure/service-fabric/service-fabric-reliable-services-configuration) to configure services at app level. + **Warning Note** : Changing Replication/TranscationalReplicator settings at cluster level changes settings for all stateful services include system services. This is generally not recommended. See this document [Configure Azure Service Fabric Reliable Services - Azure Service Fabric | Microsoft Docs](./service-fabric-reliable-services-configuration.md) to configure services at app level. | **Parameter** | **Allowed Values** | **Upgrade Policy**| **Guidance or Short Description** | @@ -932,7 +932,7 @@ The following is a list of Fabric settings that you can customize, organized by |Level |Int, default is 4 | Dynamic |Trace etw level can take values 1, 2, 3, 4. To be supported you must keep the trace level at 4 | ## TransactionalReplicator - **Warning Note** : Changing Replication/TranscationalReplicator settings at cluster level changes settings for all stateful services include system services. This is generally not recommended. See this document [Configure Azure Service Fabric Reliable Services - Azure Service Fabric | Microsoft Docs](https://docs.microsoft.com/azure/service-fabric/service-fabric-reliable-services-configuration) to configure services at app level. + **Warning Note** : Changing Replication/TranscationalReplicator settings at cluster level changes settings for all stateful services include system services. This is generally not recommended. See this document [Configure Azure Service Fabric Reliable Services - Azure Service Fabric | Microsoft Docs](./service-fabric-reliable-services-configuration.md) to configure services at app level. | **Parameter** | **Allowed Values** | **Upgrade Policy** | **Guidance or Short Description** | | --- | --- | --- | --- | @@ -996,4 +996,4 @@ The following is a list of Fabric settings that you can customize, organized by |PropertyGroup| UserServiceMetricCapacitiesMap, default is None | Static | A collection of user services resource governance limits Needs to be static as it affects AutoDetection logic | ## Next steps -For more information, see [Upgrade the configuration of an Azure cluster](service-fabric-cluster-config-upgrade-azure.md) and [Upgrade the configuration of a standalone cluster](service-fabric-cluster-config-upgrade-windows-server.md). +For more information, see [Upgrade the configuration of an Azure cluster](service-fabric-cluster-config-upgrade-azure.md) and [Upgrade the configuration of a standalone cluster](service-fabric-cluster-config-upgrade-windows-server.md). \ No newline at end of file diff --git a/articles/service-fabric/service-fabric-cluster-resource-manager-movement-cost.md b/articles/service-fabric/service-fabric-cluster-resource-manager-movement-cost.md index b913791a76b8..6d1d768de51b 100644 --- a/articles/service-fabric/service-fabric-cluster-resource-manager-movement-cost.md +++ b/articles/service-fabric/service-fabric-cluster-resource-manager-movement-cost.md @@ -54,7 +54,7 @@ await fabricClient.ServiceManager.UpdateServiceAsync(new Uri("fabric:/AppName/Se ## Dynamically specifying move cost on a per-replica basis -The preceding snippets are all for specifying MoveCost for a whole service at once from outside the service itself. However, move cost is most useful when the move cost of a specific service object changes over its lifespan. Since the services themselves probably have the best idea of how costly they are to move a given time, there's an API for services to report their own individual move cost during runtime. +The preceding snippets are all for specifying MoveCost for a whole service at once from outside the service itself. However, move cost is most useful when the move cost of a specific service object changes over its lifespan. Since the services themselves probably have the best idea of how costly they are to move a given time, there's an API for services to report their own individual move cost during runtime. C#: @@ -62,6 +62,9 @@ C#: this.Partition.ReportMoveCost(MoveCost.Medium); ``` +> [!NOTE] +> You can only set the movement cost for secondary replicas through code. + ## Reporting move cost for a partition The previous section describes how service replicas or instances report MoveCost themselves. We provided Service Fabric API for reporting MoveCost values on behalf of other partitions. Sometimes service replica or instance can't determine the best MoveCost value by itself, and must rely on other services logic. Reporting MoveCost on behalf of other partitions, alongside [reporting load on behalf of other partitions](service-fabric-cluster-resource-manager-metrics.md#reporting-load-for-a-partition), allows you to completely manage partitions from outside. These APIs eliminate needs for [the Sidecar pattern](/azure/architecture/patterns/sidecar), from the perspective of the Cluster Resource Manager. diff --git a/articles/service-fabric/service-fabric-connect-to-secure-cluster.md b/articles/service-fabric/service-fabric-connect-to-secure-cluster.md index 46c97583551c..707bc8995c39 100644 --- a/articles/service-fabric/service-fabric-connect-to-secure-cluster.md +++ b/articles/service-fabric/service-fabric-connect-to-secure-cluster.md @@ -235,7 +235,7 @@ catch (Exception e) The following example relies on Microsoft.IdentityModel.Clients.ActiveDirectory, Version: 2.19.208020213. > [!IMPORTANT] -> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](/azure/active-directory/develop/msal-migration) for more details. +> The [Microsoft.IdentityModel.Clients.ActiveDirectory](https://www.nuget.org/packages/Microsoft.IdentityModel.Clients.ActiveDirectory) NuGet package and Azure AD Authentication Library (ADAL) have been deprecated. No new features have been added since June 30, 2020. We strongly encourage you to upgrade, see the [migration guide](../active-directory/develop/msal-migration.md) for more details. For more information on AAD token acquisition, see [Microsoft.Identity.Client](/dotnet/api/microsoft.identity.client?view=azure-dotnet). @@ -390,4 +390,4 @@ At least two certificates should be used for securing the cluster, one for the c * [Managing your Service Fabric applications in Visual Studio](service-fabric-manage-application-in-visual-studio.md) * [Service Fabric Health model introduction](service-fabric-health-introduction.md) * [Application Security and RunAs](service-fabric-application-runas-security.md) -* [Getting started with Service Fabric CLI](service-fabric-cli.md) +* [Getting started with Service Fabric CLI](service-fabric-cli.md) \ No newline at end of file diff --git a/articles/service-fabric/service-fabric-local-linux-cluster-windows-wsl2.md b/articles/service-fabric/service-fabric-local-linux-cluster-windows-wsl2.md index c3bc29805772..88e7319e9393 100644 --- a/articles/service-fabric/service-fabric-local-linux-cluster-windows-wsl2.md +++ b/articles/service-fabric/service-fabric-local-linux-cluster-windows-wsl2.md @@ -24,7 +24,7 @@ Before you get started, you need: * Set up Ubuntu 18.04 Linux Distribution from Microsoft Store while setting up WSL2 >[!TIP] -> To install WSL2 on your Windows machine, follow the steps in the [WSL documentation](https://docs.microsoft.com/windows/wsl/install). After installing, please ensure installation of Ubuntu-18.04, make it your default distribution and it should be up and running. +> To install WSL2 on your Windows machine, follow the steps in the [WSL documentation](/windows/wsl/install). After installing, please ensure installation of Ubuntu-18.04, make it your default distribution and it should be up and running. > ## Set up Service Fabric SDK inside Linux Distribution diff --git a/articles/service-fabric/service-fabric-reliable-services-exception-serialization.md b/articles/service-fabric/service-fabric-reliable-services-exception-serialization.md index 0509719dfd35..ba6a670a36b5 100644 --- a/articles/service-fabric/service-fabric-reliable-services-exception-serialization.md +++ b/articles/service-fabric/service-fabric-reliable-services-exception-serialization.md @@ -6,8 +6,8 @@ ms.date: 03/30/2022 ms.custom: devx-track-csharp --- # Remoting Exception Serialization Overview -BinaryFormatter based serialization is not secure and Microsoft strongly recommends not to use BinaryFormatter for data processing. More details on the security implications can be found [here](https://docs.microsoft.com/dotnet/standard/serialization/binaryformatter-security-guide). -Service Fabric had been using BinaryFormatter for serializing Exceptions. Starting ServiceFabric v9.0, [Data Contract based serialization](https://docs.microsoft.com/dotnet/api/system.runtime.serialization.datacontractserializer?view=net-6.0) for remoting exceptions is made available as an opt-in feature. It is strongly recommended to opt for DataContract remoting exception serialization by following the below mentioned steps. +BinaryFormatter based serialization is not secure and Microsoft strongly recommends not to use BinaryFormatter for data processing. More details on the security implications can be found [here](/dotnet/standard/serialization/binaryformatter-security-guide). +Service Fabric had been using BinaryFormatter for serializing Exceptions. Starting ServiceFabric v9.0, [Data Contract based serialization](/dotnet/api/system.runtime.serialization.datacontractserializer?view=net-6.0) for remoting exceptions is made available as an opt-in feature. It is strongly recommended to opt for DataContract remoting exception serialization by following the below mentioned steps. Support for BinaryFormatter based remoting exception serialization will be deprecated in the future. @@ -389,4 +389,4 @@ Existing services must follow the below order(*Service first*) to upgrade. Failu * [Web API with OWIN in Reliable Services](./service-fabric-reliable-services-communication-aspnetcore.md) * [Windows Communication Foundation communication with Reliable Services](service-fabric-reliable-services-communication-wcf.md) -* [Secure communication for Reliable Services](service-fabric-reliable-services-secure-communication.md) +* [Secure communication for Reliable Services](service-fabric-reliable-services-secure-communication.md) \ No newline at end of file diff --git a/articles/service-fabric/service-fabric-tutorial-dotnet-app-enable-https-endpoint.md b/articles/service-fabric/service-fabric-tutorial-dotnet-app-enable-https-endpoint.md index 5e1bb751ebcd..740721affe86 100644 --- a/articles/service-fabric/service-fabric-tutorial-dotnet-app-enable-https-endpoint.md +++ b/articles/service-fabric/service-fabric-tutorial-dotnet-app-enable-https-endpoint.md @@ -265,6 +265,13 @@ if ($cert -eq $null) $keyName=$cert.PrivateKey.CspKeyContainerInfo.UniqueKeyContainerName $keyPath = "C:\ProgramData\Microsoft\Crypto\RSA\MachineKeys\" + + if ($keyName -eq $null){ + $privateKey = [System.Security.Cryptography.X509Certificates.RSACertificateExtensions]::GetRSAPrivateKey($cert) + $keyName = $privateKey.Key.UniqueName + $keyPath = "C:\ProgramData\Microsoft\Crypto\Keys" + } + $fullPath=$keyPath+$keyName $acl=(Get-Item $fullPath).GetAccessControl('Access') diff --git a/articles/service-health/azure-status-overview.md b/articles/service-health/azure-status-overview.md index 8cbd20ce6e1f..75938187d601 100644 --- a/articles/service-health/azure-status-overview.md +++ b/articles/service-health/azure-status-overview.md @@ -2,7 +2,7 @@ title: Azure status overview | Microsoft Docs description: A global view into the health of Azure services ms.topic: overview -ms.date: 06/11/2019 +ms.date: 05/26/2022 --- # Azure status overview @@ -17,14 +17,27 @@ The Azure status page gets updated in real time as the health of Azure services ![Azure status refresh](./media/azure-status-overview/update.PNG) -## Azure status history - -While the Azure status page always shows the latest health information, you can view older events using the [Azure status history page](https://status.azure.com/status/history/). The history page contains all RCAs for incidents that occurred on November 20th, 2019 or later and will - from that date forward - provide a 5-year RCA history. RCAs prior to November 20th, 2019 are not available. - ## RSS Feed Azure status also provides [an RSS feed](https://status.azure.com/status/feed/) of changes to the health of Azure services that you can subscribe to. +## When does Azure publish communications to the Status page? + +Most of our service issue communications are provided as targeted notifications sent directly to impacted customers & partners. These are delivered through [Azure Service Health](https://azure.microsoft.com/features/service-health/) in the Azure portal and trigger any [Azure Service Health alerts](/azure/service-health/alerts-activity-log-service-notifications-portal?toc=%2Fazure%2Fservice-health%2Ftoc.json) that have been configured. The public Status page is only used to communicate about service issues under three specific scenarios: + +- **Scenario 1 - Broad impact involving multiple regions, zones, or services** - A service issue has broad/significant customer impact across multiple services for a full region or multiple regions. We notify you in this case because customer-configured resilience like high availability and/or disaster recovery may not be sufficient to avoid impact. +- **Scenario 2 - Azure portal / Service Health not accessible** - A service issue impedes you from accessing the Azure portal or Azure Service Health and thus impacted our standard outage communications path described earlier. +- **Scenario 3 - Service Impact, but not sure who exactly is affected yet** - The service issue has broad/significant customer impact but we aren't yet able to confirm which customers, regions, or services are affected. In this case, we aren't able to send targeted communications, so we provide public updates. + +## When does Azure publish RCAs to the Status History page? + +While the [Azure status page](https://status.azure.com/status) always shows the latest health information, you can view older events using the [Azure status history page](https://status.azure.com/status/history/). The history page contains all RCAs (Root Cause Analyses) for incidents that occurred on November 20, 2019 or later and will - from that date forward - provide a 5-year RCA history. RCAs prior to November 20, 2019 aren't available. + +After June 1st 2022, the [Azure status history page](https://status.azure.com/status/history/) will only be used to provide RCAs for scenario 1 above. We're committed to publishing RCAs publicly for service issues that had the broadest impact, such as those with both a multi-service and multi-region impact. We publish to ensure that all customers and the industry at large can learn from our retrospectives on these issues, and understand what steps we're taking to make such issues less likely and/or less impactful in future. + +For scenarios 2 and 3 above - We may communicate publicly on the Status page during impact to work around when our standard, targeted communications aren't able to reach all impacted customers. After the issue is mitigated, we'll conduct a thorough impact analysis to determine exactly which customer subscriptions were impacted. In such scenarios, we'll provide the relevant PIR only to affected customers via [Azure Service Health](https://azure.microsoft.com/features/service-health/) in the Azure portal. + + ## Next Steps * Learn how you can get a more personalized view into Azure health with [Service Health](./service-health-overview.md). diff --git a/articles/site-recovery/site-recovery-faq.yml b/articles/site-recovery/site-recovery-faq.yml index 36f0708c064d..966721db13d1 100644 --- a/articles/site-recovery/site-recovery-faq.yml +++ b/articles/site-recovery/site-recovery-faq.yml @@ -216,12 +216,14 @@ sections: - question: | Can I replicate over a site-to-site VPN to Azure? answer: | - Azure Site Recovery replicates data to an Azure storage account or managed disks, over a public endpoint. Replication isn't over a site-to-site VPN. - - - question: | - Why can't I replicate over VPN? - answer: | - When you replicate to Azure, replication traffic reaches the public endpoints of an Azure Storage. Thus you can only replicate over the public internet or via ExpressRoute (Microsoft peering or an existing public peering). + Azure Site Recovery replicates data to an Azure storage account or managed disks, over a public endpoint. However, replication can be performed over Site-to-Site VPN as well. Site-to-Site VPN connectivity allows organizations to connect existing networks to Azure, or Azure networks to each other. Site-to-Site VPN occurs over IPSec tunneling over the internet, leveraging existing on-premises edge network equipment and network appliances in Azure, either native features like Azure Virtual Private Network (VPN) Gateway or 3rd party options such as Check Point CloudGaurd, Palo Alto NextGen Firewall. + + - Private connectivity over the public Internet to Microsoft Edge + - Recovery Service Vaults configured for security with Private Endpoints + - Replication over customer private Virtual Network connection + - Easy transition to "Future State" + - No SLA and potentially higher latency + - Requires an on-premises VPN device availability - question: | Can I use Riverbed SteelHeads for replication? diff --git a/articles/spatial-anchors/tutorials/tutorial-share-anchors-across-devices.md b/articles/spatial-anchors/tutorials/tutorial-share-anchors-across-devices.md index b9a143b339ef..e049a2ef6d73 100644 --- a/articles/spatial-anchors/tutorials/tutorial-share-anchors-across-devices.md +++ b/articles/spatial-anchors/tutorials/tutorial-share-anchors-across-devices.md @@ -49,7 +49,7 @@ Follow the instructions [here](../how-tos/setup-unity-project.md#download-asa-pa ## Deploy the Sharing Anchors service > [!NOTE] -> In this tutorial we will be using the free tier of the Azure App Service. The free tier will time out after [20 min](https://docs.microsoft.com/azure/architecture/framework/services/compute/azure-app-service/reliability#configuration-recommendations) of inactivity and reset the memory cache. +> In this tutorial we will be using the free tier of the Azure App Service. The free tier will time out after [20 min](/azure/architecture/framework/services/compute/azure-app-service/reliability#configuration-recommendations) of inactivity and reset the memory cache. ## [Visual Studio](#tab/VS) diff --git a/articles/spring-cloud/breaking-changes.md b/articles/spring-cloud/breaking-changes.md new file mode 100644 index 000000000000..604ddbaf4a86 --- /dev/null +++ b/articles/spring-cloud/breaking-changes.md @@ -0,0 +1,73 @@ +--- +title: Azure Spring Apps API breaking changes +description: Describes the breaking changes introduced by the latest Azure Spring Apps stable API version. +author: karlerickson +ms.author: yuwzho +ms.service: spring-cloud +ms.topic: how-to +ms.date: 05/25/2022 +ms.custom: devx-track-java +--- + +# Azure Spring Apps API breaking changes + +> [!NOTE] +> Azure Spring Apps is the new name for the Azure Spring Cloud service. Although the service has a new name, you'll see the old name in some places for a while as we work to update assets such as screenshots, videos, and diagrams. + +**This article applies to:** ✔️ Basic/Standard tier ✔️ Enterprise tier + +This article describes breaking changes introduced into the Azure Spring Apps API. + +The Azure Spring Apps service releases the new stable API version 2022-04-01. The new API version introduces breaking changes based on the previous stable API version 2020-07-01. We suggest that you update your API calls to the new API version. + +## Previous API deprecation date + +The previous API version 2020-07-01 will not be supported starting April, 2025. + +## API breaking changes from 2020-07-01 to 2022-04-01 + +### Deprecate number value CPU and MemoryInGB in Deployments + +Deprecate field `properties.deploymentSettings.cpu` and `properties.deploymentSettings.memoryInGB` in the `Spring/Apps/Deployments` resource. Use `properties.deploymentSettings.resourceRequests.cpu` and `properties.deploymentSettings.resourceRequests.memory` instead. + +### RBAC role change for blue-green deployment + +Deprecate field `properties.activeDeploymentName` in the `Spring/Apps` resource. Use `POST/SUBSCRIPTIONS/RESOURCEGROUPS/PROVIDERS/MICROSOFT.APPPLATFORM/SPRING/APPS/SETACTIVEDEPLOYMENTS` for blue-green deployment. This action needs a separate RBAC role `spring/apps/setActiveDeployments/action` to perform. + +### Move options from different property bags for the Spring/Apps/Deployments resource + +- Deprecate `properties.createdTime`. Use `systemData.createdAt`. +- Deprecate `properties.deploymentSettings.jvmOptions`. Use `properties.source.jvmOptions`. +- Deprecate `properties.deploymentSettings.jvmOptions`. Use `properties.source.runtimeVersion`. +- Deprecate `properties.deploymentSettings.netCoreMainEntryPath`. Use `properties.source.netCoreMainEntryPath`. +- Deprecate `properties.appName`, which you can extract from `id`. + +## Updates in the Azure CLI extension + +### Add new RBAC role for blue-green deployment + +You need to add RBAC role `spring/apps/setActiveDeployments/action` to perform the following Azure CLI commands: + +```azurecli +az spring app set-deployment \ + --resource-group \ + --service \ + --name \ + --deployment +az spring app unset-deployment \ + --resource-group \ + --service \ + --name +``` + +### Output updates + +If you're using the Azure CLI `spring-cloud` extension with a version lower than 3.0.0, and you want to upgrade the extension version or migrate to the `spring` extension, then you should take care of the following output updates. + +- `az spring app` command output: Remove `properties.activeDeploymentName`. Use `properties.activeDeployment.name` instead. +- `az spring app` command output: Remove `properties.createdTime`. Use `systemData.createdAt` instead. +- `az spring app` command output: Remove `properties.activeDeployment.properties.deploymentSettings.cpu`. Use `properties.activeDeployment.properties.deploymentSettings.resourceRequests.cpu` instead. +- `az spring app` command output: Remove `properties.activeDeployment.properties.deploymentSettings.memoryInGB`. Use `properties.activeDeployment.properties.deploymentSettings.resourceRequests.memory` instead. +- `az spring app` command output: Remove `properties.activeDeployment.properties.deploymentSettings.jvmOptions`. Use `properties.activeDeployment.properties.source.jvmOptions` instead. +- `az spring app` command output: Remove `properties.activeDeployment.properties.deploymentSettings.runtimeVersion`. Use `properties.activeDeployment.properties.source.runtimeVersion` instead. +- `az spring app` command output: Remove `properties.activeDeployment.properties.deploymentSettings.netCoreMainEntryPath`. Use `properties.activeDeployment.properties.source.netCoreMainEntryPath` instead. diff --git a/articles/spring-cloud/how-to-deploy-with-custom-container-image.md b/articles/spring-cloud/how-to-deploy-with-custom-container-image.md index 4630cb68b1e9..9f0c176eca57 100644 --- a/articles/spring-cloud/how-to-deploy-with-custom-container-image.md +++ b/articles/spring-cloud/how-to-deploy-with-custom-container-image.md @@ -21,7 +21,7 @@ This article explains how to deploy Spring Boot applications in Azure Spring App ## Prerequisites * A container image containing the application. -* The image is pushed to an image registry. For more information, see [Azure Container Registry](/azure/container-instances/container-instances-tutorial-prepare-acr). +* The image is pushed to an image registry. For more information, see [Azure Container Registry](../container-instances/container-instances-tutorial-prepare-acr.md). > [!NOTE] > The web application must listen on port `1025` for Standard tier and on port `8080` for Enterprise tier. The way to change the port depends on the framework of the application. For example, specify `SERVER_PORT=1025` for Spring Boot applications or `ASPNETCORE_URLS=http://+:1025/` for ASP.Net Core applications. The probe can be disabled for applications that do not listen on any port. @@ -173,7 +173,7 @@ When your application is restarted or scaled out, the latest image will always b ### Avoid not being able to connect to the container registry in a VNet -If you deployed the instance to a VNet, make sure you allow the network traffic to your container registry in the NSG or Azure Firewall (if used). For more information, see [Customer responsibilities for running in VNet](/azure/spring-cloud/vnet-customer-responsibilities) to add the needed security rules. +If you deployed the instance to a VNet, make sure you allow the network traffic to your container registry in the NSG or Azure Firewall (if used). For more information, see [Customer responsibilities for running in VNet](./vnet-customer-responsibilities.md) to add the needed security rules. ### Install an APM into the image manually @@ -266,4 +266,4 @@ az spring app deployment create \ ## Next steps -* [How to capture dumps](/azure/spring-cloud/how-to-capture-dumps) +* [How to capture dumps](./how-to-capture-dumps.md) \ No newline at end of file diff --git a/articles/spring-cloud/quickstart-deploy-apps.md b/articles/spring-cloud/quickstart-deploy-apps.md index b59daab33037..73d43888a82a 100644 --- a/articles/spring-cloud/quickstart-deploy-apps.md +++ b/articles/spring-cloud/quickstart-deploy-apps.md @@ -209,7 +209,7 @@ Compiling the project takes 5-10 minutes. Once completed, you should have indivi 1. If you didn't run the following commands in the previous quickstarts, set the CLI defaults. ```azurecli - az configure --defaults group= spring-cloud= + az configure --defaults group= spring= ``` 1. Create the 2 core Spring applications for PetClinic: API gateway and customers-service. diff --git a/articles/spring-cloud/toc.yml b/articles/spring-cloud/toc.yml index 9acef5a90e2e..65ec91ef074a 100644 --- a/articles/spring-cloud/toc.yml +++ b/articles/spring-cloud/toc.yml @@ -262,6 +262,8 @@ items: href: reference-architecture.md - name: Reference items: + - name: API breaking changes + href: breaking-changes.md - name: Azure CLI plugin href: /cli/azure/spring?toc=/azure/spring-cloud/toc.json&bc=/azure/bread/toc.json - name: Azure PowerShell diff --git a/articles/static-web-apps/quotas.md b/articles/static-web-apps/quotas.md index 474f58485aa0..44fbb39dad2a 100644 --- a/articles/static-web-apps/quotas.md +++ b/articles/static-web-apps/quotas.md @@ -22,6 +22,7 @@ The following quotas exist for Azure Static Web Apps. | Plan size | 500 MB max app size for a single deployment, and 0.50 GB max for all staging and production environments | 500 MB max app size for a single deployment, and 2.00 GB max combined across all staging and production environments | | Pre-production environments | 3 | 10 | | Custom domains | 2 per app | 5 per app | +| Allowed IP ranges | Unavailable | 25 | | Authorization (built-in roles) | Unlimited end-users that may authenticate with built-in `authenticated` role | Unlimited end-users that may authenticate with built-in `authenticated` role | | Authorization (custom roles) | Maximum of 25 end-users that may belong to custom roles via [invitations](authentication-authorization.md?tabs=invitations#role-management) | Maximum of 25 end-users that may belong to custom roles via [invitations](authentication-authorization.md?tabs=invitations#role-management), or unlimited end-users that may be assigned custom roles via [serverless function](authentication-authorization.md?tabs=function#role-management) | diff --git a/articles/storage/blobs/TOC.yml b/articles/storage/blobs/TOC.yml index b68be7d019a6..95e63576410c 100644 --- a/articles/storage/blobs/TOC.yml +++ b/articles/storage/blobs/TOC.yml @@ -7,6 +7,8 @@ items: href: storage-blobs-overview.md - name: Compare core storage services href: ../common/storage-introduction.md?toc=%2fazure%2fstorage%2fblobs%2ftoc.json + - name: Blob Storage feature support + href: storage-feature-support-in-storage-accounts.md expanded: true - name: Blob storage items: @@ -305,8 +307,6 @@ items: href: storage-blob-static-website.md - name: Upgrading to Data Lake Storage Gen2 href: upgrade-to-data-lake-storage-gen2.md - - name: Blob Storage feature support - href: storage-feature-support-in-storage-accounts.md - name: How to items: - name: Configure Blob Storage @@ -810,6 +810,8 @@ items: items: - name: Introduction to Data Lake Storage href: data-lake-storage-introduction.md + - name: Blob Storage feature support + href: storage-feature-support-in-storage-accounts.md - name: Tutorials items: - name: Use with Synapse SQL diff --git a/articles/storage/blobs/access-tiers-overview.md b/articles/storage/blobs/access-tiers-overview.md index 49986e4148bc..599960b8c128 100644 --- a/articles/storage/blobs/access-tiers-overview.md +++ b/articles/storage/blobs/access-tiers-overview.md @@ -1,11 +1,11 @@ --- title: Hot, Cool, and Archive access tiers for blob data titleSuffix: Azure Storage -description: Azure storage offers different access tiers so that you can store your blob data in the most cost-effective manner based on how it is being used. Learn about the Hot, Cool, and Archive access tiers for Blob Storage. +description: Azure storage offers different access tiers so that you can store your blob data in the most cost-effective manner based on how it's being used. Learn about the Hot, Cool, and Archive access tiers for Blob Storage. author: tamram ms.author: tamram -ms.date: 02/28/2022 +ms.date: 05/18/2022 ms.service: storage ms.subservice: blobs ms.topic: conceptual @@ -14,7 +14,9 @@ ms.reviewer: fryu # Hot, Cool, and Archive access tiers for blob data -Data stored in the cloud grows at an exponential pace. To manage costs for your expanding storage needs, it can be helpful to organize your data based on how frequently it will be accessed and how long it will be retained. Azure storage offers different access tiers so that you can store your blob data in the most cost-effective manner based on how it is being used. Azure Storage access tiers include: +We sometimes use the first person plural in content. + +Data stored in the cloud grows at an exponential pace. To manage costs for your expanding storage needs, it can be helpful to organize your data based on how frequently it will be accessed and how long it will be retained. Azure storage offers different access tiers so that you can store your blob data in the most cost-effective manner based on how it's being used. Azure Storage access tiers include: - **Hot tier** - An online tier optimized for storing data that is accessed or modified frequently. The Hot tier has the highest storage costs, but the lowest access costs. - **Cool tier** - An online tier optimized for storing data that is infrequently accessed or modified. Data in the Cool tier should be stored for a minimum of 30 days. The Cool tier has lower storage costs and higher access costs compared to the Hot tier. @@ -34,14 +36,14 @@ Example usage scenarios for the Hot tier include: Usage scenarios for the Cool access tier include: - Short-term data backup and disaster recovery. -- Older data sets that are not used frequently, but are expected to be available for immediate access. +- Older data sets that aren't used frequently, but are expected to be available for immediate access. - Large data sets that need to be stored in a cost-effective way while additional data is being gathered for processing. To learn how to move a blob to the Hot or Cool tier, see [Set a blob's access tier](access-tiers-online-manage.md). Data in the Cool tier has slightly lower availability, but offers the same high durability, retrieval latency, and throughput characteristics as the Hot tier. For data in the Cool tier, slightly lower availability and higher access costs may be acceptable trade-offs for lower overall storage costs, as compared to the Hot tier. For more information, see [SLA for storage](https://azure.microsoft.com/support/legal/sla/storage/v1_5/). -A blob in the Cool tier in a general-purpose v2 accounts is subject to an early deletion penalty if it is deleted or moved to a different tier before 30 days has elapsed. This charge is prorated. For example, if a blob is moved to the Cool tier and then deleted after 21 days, you'll be charged an early deletion fee equivalent to 9 (30 minus 21) days of storing that blob in the Cool tier. +A blob in the Cool tier in a general-purpose v2 account is subject to an early deletion penalty if it's deleted or moved to a different tier before 30 days has elapsed. This charge is prorated. For example, if a blob is moved to the Cool tier and then deleted after 21 days, you'll be charged an early deletion fee equivalent to 9 (30 minus 21) days of storing that blob in the Cool tier. The Hot and Cool tiers support all redundancy configurations. For more information about data redundancy options in Azure Storage, see [Azure Storage redundancy](../common/storage-redundancy.md). @@ -59,7 +61,7 @@ Data must remain in the Archive tier for at least 180 days or be subject to an e While a blob is in the Archive tier, it can't be read or modified. To read or download a blob in the Archive tier, you must first rehydrate it to an online tier, either Hot or Cool. Data in the Archive tier can take up to 15 hours to rehydrate, depending on the priority you specify for the rehydration operation. For more information about blob rehydration, see [Overview of blob rehydration from the Archive tier](archive-rehydrate-overview.md). -An archived blob's metadata remains available for read access, so that you can list the blob and its properties, metadata, and index tags. Metadata for a blob in the Archive tier is read-only, while blob index tags can be read or written. Snapshots are not supported for archived blobs. +An archived blob's metadata remains available for read access, so that you can list the blob and its properties, metadata, and index tags. Metadata for a blob in the Archive tier is read-only, while blob index tags can be read or written. Snapshots aren't supported for archived blobs. The following operations are supported for blobs in the Archive tier: @@ -73,7 +75,7 @@ The following operations are supported for blobs in the Archive tier: - [Set Blob Tags](/rest/api/storageservices/set-blob-tags) - [Set Blob Tier](/rest/api/storageservices/set-blob-tier) -Only storage accounts that are configured for LRS, GRS, or RA-GRS support moving blobs to the Archive tier. The Archive tier is not supported for ZRS, GZRS, or RA-GZRS accounts. For more information about redundancy configurations for Azure Storage, see [Azure Storage redundancy](../common/storage-redundancy.md). +Only storage accounts that are configured for LRS, GRS, or RA-GRS support moving blobs to the Archive tier. The Archive tier isn't supported for ZRS, GZRS, or RA-GZRS accounts. For more information about redundancy configurations for Azure Storage, see [Azure Storage redundancy](../common/storage-redundancy.md). To change the redundancy configuration for a storage account that contains blobs in the Archive tier, you must first rehydrate all archived blobs to the Hot or Cool tier. Microsoft recommends that you avoid changing the redundancy configuration for a storage account that contains archived blobs if at all possible, because rehydration operations can be costly and time-consuming. @@ -83,13 +85,13 @@ Migrating a storage account from LRS to GRS is supported as long as no blobs wer Storage accounts have a default access tier setting that indicates the online tier in which a new blob is created. The default access tier setting can be set to either Hot or Cool. Users can override the default setting for an individual blob when uploading the blob or changing its tier. -The default access tier for a new general-purpose v2 storage account is set to the Hot tier by default. You can change the default access tier setting when you create a storage account or after it is created. If you do not change this setting on the storage account or explicitly set the tier when uploading a blob, then a new blob is uploaded to the Hot tier by default. +The default access tier for a new general-purpose v2 storage account is set to the Hot tier by default. You can change the default access tier setting when you create a storage account or after it's created. If you don't change this setting on the storage account or explicitly set the tier when uploading a blob, then a new blob is uploaded to the Hot tier by default. A blob that doesn't have an explicitly assigned tier infers its tier from the default account access tier setting. If a blob's access tier is inferred from the default account access tier setting, then the Azure portal displays the access tier as **Hot (inferred)** or **Cool (inferred)**. -Changing the default access tier setting for a storage account applies to all blobs in the account for which an access tier has not been explicitly set. If you toggle the default access tier setting from Hot to Cool in a general-purpose v2 account, then you are charged for write operations (per 10,000) for all blobs for which the access tier is inferred. You are charged for both read operations (per 10,000) and data retrieval (per GB) if you toggle from Cool to Hot in a general-purpose v2 account. +Changing the default access tier setting for a storage account applies to all blobs in the account for which an access tier hasn't been explicitly set. If you toggle the default access tier setting from Hot to Cool in a general-purpose v2 account, then you're charged for write operations (per 10,000) for all blobs for which the access tier is inferred. You're charged for both read operations (per 10,000) and data retrieval (per GB) if you toggle from Cool to Hot in a general-purpose v2 account. -When you create a legacy Blob Storage account, you must specify the default access tier setting as Hot or Cool at create time. There's no charge for changing the default account access tier setting from Hot to Cool in a legacy Blob Storage account. You are charged for both read operations (per 10,000) and data retrieval (per GB) if you toggle from Cool to Hot in a Blob Storage account. Microsoft recommends using general-purpose v2 storage accounts rather than Blob Storage accounts when possible. +When you create a legacy Blob Storage account, you must specify the default access tier setting as Hot or Cool at create time. There's no charge for changing the default account access tier setting from Hot to Cool in a legacy Blob Storage account. You're charged for both read operations (per 10,000) and data retrieval (per GB) if you toggle from Cool to Hot in a Blob Storage account. Microsoft recommends using general-purpose v2 storage accounts rather than Blob Storage accounts when possible. > [!NOTE] > The Archive tier is not supported as the default access tier for a storage account. @@ -100,14 +102,14 @@ To explicitly set a blob's tier when you create it, specify the tier when you up After a blob is created, you can change its tier in either of the following ways: -- By calling the [Set Blob Tier](/rest/api/storageservices/set-blob-tier) operation, either directly or via a [lifecycle management](#blob-lifecycle-management) policy. Calling [Set Blob Tier](/rest/api/storageservices/set-blob-tier) is typically the best option when you are changing a blob's tier from a hotter tier to a cooler one. -- By calling the [Copy Blob](/rest/api/storageservices/copy-blob) operation to copy a blob from one tier to another. Calling [Copy Blob](/rest/api/storageservices/copy-blob) is recommended for most scenarios where you are rehydrating a blob from the Archive tier to an online tier, or moving a blob from Cool to Hot. By copying a blob, you can avoid the early deletion penalty, if the required storage interval for the source blob has not yet elapsed. However, copying a blob results in capacity charges for two blobs, the source blob and the destination blob. +- By calling the [Set Blob Tier](/rest/api/storageservices/set-blob-tier) operation, either directly or via a [lifecycle management](#blob-lifecycle-management) policy. Calling [Set Blob Tier](/rest/api/storageservices/set-blob-tier) is typically the best option when you're changing a blob's tier from a hotter tier to a cooler one. +- By calling the [Copy Blob](/rest/api/storageservices/copy-blob) operation to copy a blob from one tier to another. Calling [Copy Blob](/rest/api/storageservices/copy-blob) is recommended for most scenarios where you're rehydrating a blob from the Archive tier to an online tier, or moving a blob from Cool to Hot. By copying a blob, you can avoid the early deletion penalty, if the required storage interval for the source blob hasn't yet elapsed. However, copying a blob results in capacity charges for two blobs, the source blob and the destination blob. Changing a blob's tier from Hot to Cool or Archive is instantaneous, as is changing from Cool to Hot. Rehydrating a blob from the Archive tier to either the Hot or Cool tier can take up to 15 hours. Keep in mind the following points when moving a blob between the Cool and Archive tiers: -- If a blob's tier is inferred as Cool based on the storage account's default access tier and the blob is moved to the Archive tier, there is no early deletion charge. +- If a blob's tier is inferred as Cool based on the storage account's default access tier and the blob is moved to the Archive tier, there's no early deletion charge. - If a blob is explicitly moved to the Cool tier and then moved to the Archive tier, the early deletion charge applies. The following table summarizes the approaches you can take to move blobs between various tiers. @@ -138,7 +140,7 @@ The following table summarizes the features of the Hot, Cool, and Archive access | **Latency**
    **(Time to first byte)** | Milliseconds | Milliseconds | Hours2 | | **Supported redundancy configurations** | All | All | LRS, GRS, and RA-GRS3 only | -1 Objects in the Cool tier on general-purpose v2 accounts have a minimum retention duration of 30 days. For Blob Storage accounts, there is no minimum retention duration for the Cool tier. +1 Objects in the Cool tier on general-purpose v2 accounts have a minimum retention duration of 30 days. For Blob Storage accounts, there's no minimum retention duration for the Cool tier. 2 When rehydrating a blob from the Archive tier, you can choose either a standard or high rehydration priority option. Each offers different retrieval latencies and costs. For more information, see [Overview of blob rehydration from the Archive tier](archive-rehydrate-overview.md). @@ -178,7 +180,7 @@ Changing the account access tier results in tier change charges for all blobs th Keep in mind the following billing impacts when changing a blob's tier: -- When a blob is uploaded or moved between tiers, it is charged at the corresponding rate immediately upon upload or tier change. +- When a blob is uploaded or moved between tiers, it's charged at the corresponding rate immediately upon upload or tier change. - When a blob is moved to a cooler tier (Hot to Cool, Hot to Archive, or Cool to Archive), the operation is billed as a write operation to the destination tier, where the write operation (per 10,000) and data write (per GB) charges of the destination tier apply. - When a blob is moved to a warmer tier (Archive to Cool, Archive to Hot, or Cool to Hot), the operation is billed as a read from the source tier, where the read operation (per 10,000) and data retrieval (per GB) charges of the source tier apply. Early deletion charges for any blob moved out of the Cool or Archive tier may apply as well. - While a blob is being rehydrated from the Archive tier, that blob's data is billed as archived data until the data is restored and the blob's tier changes to Hot or Cool. @@ -197,8 +199,8 @@ This table shows how this feature is supported in your account and the impact on | Storage account type | Blob Storage (default support) | Data Lake Storage Gen2 1 | NFS 3.0 1 | SFTP 1 | |--|--|--|--|--| -| [Standard general-purpose v2](https://docs.microsoft.com/azure/storage/common/storage-account-overview?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json#types-of-storage-accounts) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | -| [Premium block blobs](https://docs.microsoft.com/azure/storage/common/storage-account-overview?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json#types-of-storage-accounts) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | ![No](../media/icons/no-icon.png) | +| [Standard general-purpose v2](../common/storage-account-overview.md#types-of-storage-accounts) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | +| [Premium block blobs](../common/storage-account-overview.md#types-of-storage-accounts) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | ![Yes](../media/icons/yes-icon.png) | 1 Data Lake Storage Gen2, Network File System (NFS) 3.0 protocol, and SSH File Transfer Protocol (SFTP) support all require a storage account with a hierarchical namespace enabled. diff --git a/articles/storage/blobs/blob-upload-function-trigger-javascript.md b/articles/storage/blobs/blob-upload-function-trigger-javascript.md index b1fc61244e3d..9881ec72cef8 100644 --- a/articles/storage/blobs/blob-upload-function-trigger-javascript.md +++ b/articles/storage/blobs/blob-upload-function-trigger-javascript.md @@ -150,7 +150,7 @@ Copy the value of the `connectionString` property and paste it somewhere to use --- ## Create the Computer Vision service -Next, create the Computer Vision service account that will process our uploaded files. Computer Vision is part of Azure Cognitive Services and offers various features for extracting data out of images. You can learn more about Computer Vision on the [overview page](/azure/cognitive-services/computer-vision/overview). +Next, create the Computer Vision service account that will process our uploaded files. Computer Vision is part of Azure Cognitive Services and offers various features for extracting data out of images. You can learn more about Computer Vision on the [overview page](../../cognitive-services/computer-vision/overview.md). ### [Azure portal](#tab/computer-vision-azure-portal) @@ -347,4 +347,4 @@ If you're not going to continue to use this application, you can delete the reso 1. Select **Resource groups** from the Azure explorer 1. Find and right-click the `msdocs-storage-function` resource group from the list. -1. Select **Delete**. The process to delete the resource group may take a few minutes to complete. +1. Select **Delete**. The process to delete the resource group may take a few minutes to complete. \ No newline at end of file diff --git a/articles/storage/blobs/blob-upload-function-trigger.md b/articles/storage/blobs/blob-upload-function-trigger.md index 0b53d06205e8..6e4ae8e5c1a0 100644 --- a/articles/storage/blobs/blob-upload-function-trigger.md +++ b/articles/storage/blobs/blob-upload-function-trigger.md @@ -106,7 +106,7 @@ Copy the value of the `connectionString` property and paste it somewhere to use --- ## Create the Computer Vision service -Next, create the Computer Vision service account that will process our uploaded files. Computer Vision is part of Azure Cognitive Services and offers a variety of features for extracting data out of images. You can learn more about Computer Vision on the [overview page](/azure/cognitive-services/computer-vision/overview). +Next, create the Computer Vision service account that will process our uploaded files. Computer Vision is part of Azure Cognitive Services and offers a variety of features for extracting data out of images. You can learn more about Computer Vision on the [overview page](../../cognitive-services/computer-vision/overview.md). ### [Azure portal](#tab/azure-portal) @@ -374,4 +374,4 @@ If you're not going to continue to use this application, you can delete the reso 2) Select the **Delete resource group** button at the top of the resource group overview page. 3) Enter the resource group name *msdocs-storage-function* in the confirmation dialog. 4) Select delete. -The process to delete the resource group may take a few minutes to complete. +The process to delete the resource group may take a few minutes to complete. \ No newline at end of file diff --git a/articles/storage/blobs/data-lake-storage-acl-powershell.md b/articles/storage/blobs/data-lake-storage-acl-powershell.md index 750c7ab93a6d..919c77291274 100644 --- a/articles/storage/blobs/data-lake-storage-acl-powershell.md +++ b/articles/storage/blobs/data-lake-storage-acl-powershell.md @@ -221,8 +221,6 @@ To see an example that sets ACLs recursively in batches by specifying a batch si When you *update* an ACL, you modify the ACL instead of replacing the ACL. For example, you can add a new security principal to the ACL without affecting other security principals listed in the ACL. To replace the ACL instead of update it, see the [Set ACLs](#set-acls) section of this article. -To update an ACL, create a new ACL object with the ACL entry that you want to update, and then use that object in update ACL operation. Do not get the existing ACL, just provide ACL entries to be updated. - This section shows you how to: - Update an ACL diff --git a/articles/storage/blobs/data-lake-storage-migrate-gen1-to-gen2-azure-portal.md b/articles/storage/blobs/data-lake-storage-migrate-gen1-to-gen2-azure-portal.md index 6547a5308a04..8bb01d994136 100644 --- a/articles/storage/blobs/data-lake-storage-migrate-gen1-to-gen2-azure-portal.md +++ b/articles/storage/blobs/data-lake-storage-migrate-gen1-to-gen2-azure-portal.md @@ -17,7 +17,7 @@ On **Feb. 29, 2024** Azure Data Lake Storage Gen1 will be retired. For more info This article shows you how to simplify the migration by using the Azure portal. You can provide your consent in the Azure portal and then migrate your data and metadata (such as timestamps and ACLs) automatically from Azure Data Lake Storage Gen1 to Azure Data Lake Storage Gen2. For easier reading, this article uses the term *Gen1* to refer to Azure Data Lake Storage Gen1, and the term *Gen2* to refer to Azure Data Lake Storage Gen2. > [!NOTE] -> Your account may not qualify for portal-based migration based on certain constraints. When the **Migrate data** button is not enabled in the Azure portal for your Gen1 account, if you have a support plan, you can [file a support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). You can also get answers from community experts in [Microsoft Q&A](https://docs.microsoft.com/answers/topics/azure-data-lake-storage.html). +> Your account may not qualify for portal-based migration based on certain constraints. When the **Migrate data** button is not enabled in the Azure portal for your Gen1 account, if you have a support plan, you can [file a support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). You can also get answers from community experts in [Microsoft Q&A](/answers/topics/azure-data-lake-storage.html). > [!WARNING] > Azure Data Lake Storage Gen2 doesn't support Azure Data Lake Analytics. If you're using Azure Data Lake Analytics, you'll need to migrate before proceeding. See [Migrate Azure Data Lake Analytics workloads](#migrate-azure-data-lake-analytics-workloads) for more information. @@ -66,9 +66,9 @@ For Gen1, ensure that the [Owner](../../role-based-access-control/built-in-roles ## Migrate Azure Data Lake Analytics workloads -Azure Data Lake Storage Gen2 doesn't support Azure Data Lake Analytics. Azure Data Lake Analytics [will be retired](https://azure.microsoft.com/updates/migrate-to-azure-synapse-analytics/) on February 29, 2024. If you attempt to use the Azure portal to migrate an Azure Data Lake Storage Gen1 account that is used for Azure Data Lake Analytics, it's possible that you'll break your Azure Data Lake Analytics workloads. You must first [migrate your Azure Data Lake Analytics workloads to Azure Synapse Analytics](/azure/data-lake-analytics/migrate-azure-data-lake-analytics-to-synapse) or another supported compute platform before attempting to migrate your Gen1 account. +Azure Data Lake Storage Gen2 doesn't support Azure Data Lake Analytics. Azure Data Lake Analytics [will be retired](https://azure.microsoft.com/updates/migrate-to-azure-synapse-analytics/) on February 29, 2024. If you attempt to use the Azure portal to migrate an Azure Data Lake Storage Gen1 account that is used for Azure Data Lake Analytics, it's possible that you'll break your Azure Data Lake Analytics workloads. You must first [migrate your Azure Data Lake Analytics workloads to Azure Synapse Analytics](../../data-lake-analytics/migrate-azure-data-lake-analytics-to-synapse.md) or another supported compute platform before attempting to migrate your Gen1 account. -For more information, see [Manage Azure Data Lake Analytics using the Azure portal](/azure/data-lake-analytics/data-lake-analytics-manage-use-portal). +For more information, see [Manage Azure Data Lake Analytics using the Azure portal](../../data-lake-analytics/data-lake-analytics-manage-use-portal.md). ## Perform the migration @@ -200,7 +200,7 @@ Post migration, if you chose the option that copies only data, then you will be #### While providing consent I encountered the error message *Migration initiation failed*. What should I do next? -Make sure all your Azure Data lake Analytics accounts are [migrated to Azure Synapse Analytics](/azure/data-lake-analytics/migrate-azure-data-lake-analytics-to-synapse) or another supported compute platform. Once Azure Data Lake Analytics accounts are migrated, retry the consent. If you see the issue further and you have a support plan, you can [file a support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). You can also get answers from community experts in [Microsoft Q&A](https://docs.microsoft.com/answers/topics/azure-data-lake-storage.html). +Make sure all your Azure Data lake Analytics accounts are [migrated to Azure Synapse Analytics](../../data-lake-analytics/migrate-azure-data-lake-analytics-to-synapse.md) or another supported compute platform. Once Azure Data Lake Analytics accounts are migrated, retry the consent. If you see the issue further and you have a support plan, you can [file a support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). You can also get answers from community experts in [Microsoft Q&A](/answers/topics/azure-data-lake-storage.html). #### After the migration completes, can I go back to using the Gen1 account? @@ -220,4 +220,4 @@ When you copy the data over to your Gen2-enabled account, two factors that can a ## Next steps -- Learn about migration in general. For more information, see [Migrate Azure Data Lake Storage from Gen1 to Gen2](data-lake-storage-migrate-gen1-to-gen2.md). +- Learn about migration in general. For more information, see [Migrate Azure Data Lake Storage from Gen1 to Gen2](data-lake-storage-migrate-gen1-to-gen2.md). \ No newline at end of file diff --git a/articles/storage/blobs/data-lake-storage-tutorial-extract-transform-load-hive.md b/articles/storage/blobs/data-lake-storage-tutorial-extract-transform-load-hive.md index 0480bb14ca24..65ec62151a58 100644 --- a/articles/storage/blobs/data-lake-storage-tutorial-extract-transform-load-hive.md +++ b/articles/storage/blobs/data-lake-storage-tutorial-extract-transform-load-hive.md @@ -40,27 +40,14 @@ If you don't have an Azure subscription, [create a free account](https://azure.m - **A Secure Shell (SSH) client**: For more information, see [Connect to HDInsight (Hadoop) by using SSH](../../hdinsight/hdinsight-hadoop-linux-use-ssh-unix.md). -## Download the flight data -1. Browse to [Research and Innovative Technology Administration, Bureau of Transportation Statistics](https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236&DB_Short_Name=On-Time). +## Download, extract and then upload the data -2. On the page, select the following values: +In this section, you'll download sample flight data. Then, you'll upload that data to your HDInsight cluster and then copy that data to your Data Lake Storage Gen2 account. - | Name | Value | - | --- | --- | - | Filter Year |2013 | - | Filter Period |January | - | Fields |Year, FlightDate, Reporting_Airline, IATA_CODE_Reporting_Airline, Flight_Number_Reporting_Airline, OriginAirportID, Origin, OriginCityName, OriginState, DestAirportID, Dest, DestCityName, DestState, DepDelayMinutes, ArrDelay, ArrDelayMinutes, CarrierDelay, WeatherDelay, NASDelay, SecurityDelay, LateAircraftDelay. | +1. Download the [On_Time_Reporting_Carrier_On_Time_Performance_1987_present_2016_1.zip](https://github.com/Azure-Samples/AzureStorageSnippets/blob/master/blobs/tutorials/On_Time_Reporting_Carrier_On_Time_Performance_1987_present_2016_1.zip) file. This file contains the flight data. - Clear all other fields. - -3. Select **Download**. You get a .zip file with the data fields you selected. - -## Extract and upload the data - -In this section, you'll upload data to your HDInsight cluster and then copy that data to your Data Lake Storage Gen2 account. - -1. Open a command prompt and use the following Secure Copy (Scp) command to upload the .zip file to the HDInsight cluster head node: +2. Open a command prompt and use the following Secure Copy (Scp) command to upload the .zip file to the HDInsight cluster head node: ```bash scp .zip @-ssh.azurehdinsight.net: @@ -74,13 +61,13 @@ In this section, you'll upload data to your HDInsight cluster and then copy that If you use a public key, you might need to use the `-i` parameter and specify the path to the matching private key. For example, `scp -i ~/.ssh/id_rsa .zip @-ssh.azurehdinsight.net:`. -2. After the upload has finished, connect to the cluster by using SSH. On the command prompt, enter the following command: +3. After the upload has finished, connect to the cluster by using SSH. On the command prompt, enter the following command: ```bash ssh @-ssh.azurehdinsight.net ``` -3. Use the following command to unzip the .zip file: +4. Use the following command to unzip the .zip file: ```bash unzip .zip @@ -88,7 +75,7 @@ In this section, you'll upload data to your HDInsight cluster and then copy that The command extracts a **.csv** file. -4. Use the following command to create the Data Lake Storage Gen2 container. +5. Use the following command to create the Data Lake Storage Gen2 container. ```bash hadoop fs -D "fs.azure.createRemoteFileSystemDuringInitialization=true" -ls abfs://@.dfs.core.windows.net/ @@ -98,13 +85,13 @@ In this section, you'll upload data to your HDInsight cluster and then copy that Replace the `` placeholder with the name of your storage account. -5. Use the following command to create a directory. +6. Use the following command to create a directory. ```bash hdfs dfs -mkdir -p abfs://@.dfs.core.windows.net/tutorials/flightdelays/data ``` -6. Use the following command to copy the *.csv* file to the directory: +7. Use the following command to copy the *.csv* file to the directory: ```bash hdfs dfs -put ".csv" abfs://@.dfs.core.windows.net/tutorials/flightdelays/data/ diff --git a/articles/storage/blobs/data-lake-storage-use-databricks-spark.md b/articles/storage/blobs/data-lake-storage-use-databricks-spark.md index 9561c151b8f5..fc12eb2da483 100644 --- a/articles/storage/blobs/data-lake-storage-use-databricks-spark.md +++ b/articles/storage/blobs/data-lake-storage-use-databricks-spark.md @@ -50,13 +50,9 @@ If you don't have an Azure subscription, create a [free account](https://azure.m This tutorial uses flight data from the Bureau of Transportation Statistics to demonstrate how to perform an ETL operation. You must download this data to complete the tutorial. -1. Go to [Research and Innovative Technology Administration, Bureau of Transportation Statistics](https://www.transtats.bts.gov/DL_SelectFields.asp?gnoyr_VQ=FGJ). +1. Download the [On_Time_Reporting_Carrier_On_Time_Performance_1987_present_2016_1.zip](https://github.com/Azure-Samples/AzureStorageSnippets/blob/master/blobs/tutorials/On_Time_Reporting_Carrier_On_Time_Performance_1987_present_2016_1.zip) file. This file contains the flight data. -2. Select the **Prezipped File** check box to select all data fields. - -3. Select the **Download** button and save the results to your computer. - -4. Unzip the contents of the zipped file and make a note of the file name and the path of the file. You need this information in a later step. +2. Unzip the contents of the zipped file and make a note of the file name and the path of the file. You need this information in a later step. ## Create an Azure Databricks service diff --git a/articles/storage/blobs/immutable-policy-configure-version-scope.md b/articles/storage/blobs/immutable-policy-configure-version-scope.md index c3c9413cb133..8e7a0818550b 100644 --- a/articles/storage/blobs/immutable-policy-configure-version-scope.md +++ b/articles/storage/blobs/immutable-policy-configure-version-scope.md @@ -7,14 +7,14 @@ author: tamram ms.service: storage ms.topic: how-to -ms.date: 12/01/2021 +ms.date: 05/17/2022 ms.author: tamram ms.subservice: blobs --- # Configure immutability policies for blob versions -Immutable storage for Azure Blob Storage enables users to store business-critical data in a WORM (Write Once, Read Many) state. While in a WORM state, data cannot be modified or deleted for a user-specified interval. By configuring immutability policies for blob data, you can protect your data from overwrites and deletes. Immutability policies include time-based retention policies and legal holds. For more information about immutability policies for Blob Storage, see [Store business-critical blob data with immutable storage](immutable-storage-overview.md). +Immutable storage for Azure Blob Storage enables users to store business-critical data in a WORM (Write Once, Read Many) state. While in a WORM state, data can't be modified or deleted for a user-specified interval. By configuring immutability policies for blob data, you can protect your data from overwrites and deletes. Immutability policies include time-based retention policies and legal holds. For more information about immutability policies for Blob Storage, see [Store business-critical blob data with immutable storage](immutable-storage-overview.md). An immutability policy may be scoped either to an individual blob version or to a container. This article describes how to configure a version-level immutability policy. To learn how to configure container-level immutability policies, see [Configure immutability policies for containers](immutable-policy-configure-container-scope.md). @@ -49,7 +49,7 @@ To enable support for version-level immutability when you create a storage accou :::image type="content" source="media/immutable-policy-configure-version-scope/create-account-version-level-immutability.png" alt-text="Screenshot showing how to create a storage account with version-level immutability support"::: -After the storage account is created, you can configure a default version-level policy for the account. For more details, see [Configure a default time-based retention policy](#configure-a-default-time-based-retention-policy). +After the storage account is created, you can configure a default version-level policy for the account. For more information, see [Configure a default time-based retention policy](#configure-a-default-time-based-retention-policy). ##### [PowerShell](#tab/azure-powershell) @@ -77,7 +77,7 @@ If version-level immutability support is enabled for the storage account and the Both new and existing containers can be configured to support version-level immutability. However, an existing container must undergo a migration process in order to enable support. -Keep in mind that enabling version-level immutability support for a container does not make data in that container immutable. You must also either configure a default immutability policy for the container, or an immutability policy on a specific blob version. If you enabled version-level immutability for the storage account when it was created, you can also configure a default immutability policy for the account. +Keep in mind that enabling version-level immutability support for a container doesn't make data in that container immutable. You must also either configure a default immutability policy for the container, or an immutability policy on a specific blob version. If you enabled version-level immutability for the storage account when it was created, you can also configure a default immutability policy for the account. #### Enable version-level immutability for a new container @@ -138,15 +138,15 @@ If version-level immutability support is enabled for a container and the contain #### Migrate an existing container to support version-level immutability -To configure version-level immutability policies for an existing container, you must migrate the container to support version-level immutable storage. Container migration may take some time and cannot be reversed. You can migrate ten containers at a time per storage account. +To configure version-level immutability policies for an existing container, you must migrate the container to support version-level immutable storage. Container migration may take some time and can't be reversed. You can migrate 10 containers at a time per storage account. To migrate an existing container to support version-level immutability policies, the container must have a container-level time-based retention policy configured. The migration fails unless the container has an existing policy. The retention interval for the container-level policy is maintained as the retention interval for the default version-level policy on the container. -If the container has an existing container-level legal hold, then it cannot be migrated until the legal hold is removed. +If the container has an existing container-level legal hold, then it can't be migrated until the legal hold is removed. ##### [Portal](#tab/azure-portal) -To migrate a container to support version-level immutable storage in the Azure portal, follow these steps: +To migrate a container to support version-level immutability policies in the Azure portal, follow these steps: 1. Navigate to the desired container. 1. Select the **More** button on the right, then select **Access policy**. @@ -157,7 +157,7 @@ To migrate a container to support version-level immutable storage in the Azure p :::image type="content" source="media/immutable-policy-configure-version-scope/migrate-existing-container.png" alt-text="Screenshot showing how to migrate an existing container to support version-level immutability"::: -While the migration operation is underway, the scope of the policy on the container shows as *Container*. +While the migration operation is underway, the scope of the policy on the container shows as *Container*. Any operations related to managing version-level immutability policies aren't permitted while the container migration is in progress. Other operations on blob data will proceed normally during migration. :::image type="content" source="media/immutable-policy-configure-version-scope/container-migration-in-process.png" alt-text="Screenshot showing container migration in process"::: @@ -192,7 +192,7 @@ To check the status of the long-running operation, read the operation's **JobSta $migrationOperation.JobStateInfo.State ``` -If the container does not have an existing time-based retention policy when you attempt to migrate to version-level immutability, then the operation fails. The following example checks the value of the **JobStateInfo.State** property and displays the error message if the operation failed because the container-level policy does not exist. +If the container doesn't have an existing time-based retention policy when you attempt to migrate to version-level immutability, then the operation fails. The following example checks the value of the **JobStateInfo.State** property and displays the error message if the operation failed because the container-level policy doesn't exist. ```azurepowershell if ($migrationOperation.JobStateInfo.State -eq "Failed") { @@ -248,7 +248,7 @@ az storage container-rm show \ After you have enabled version-level immutability support for a storage account or for an individual container, you can specify a default version-level time-based retention policy for the account or container. When you specify a default policy for an account or container, that policy applies by default to all new blob versions that are created in the account or container. You can override the default policy for any individual blob version in the account or container. -The default policy is not automatically applied to blob versions that existed before the default policy was configured. +The default policy isn't automatically applied to blob versions that existed before the default policy was configured. If you migrated an existing container to support version-level immutability, then the container-level policy that was in effect before the migration is migrated to a default version-level policy for the container. @@ -326,9 +326,9 @@ Time-based retention policies maintain blob data in a WORM state for a specified You have three options for configuring a time-based retention policy for a blob version: -- Option 1: You can configure a default policy on the storage account or container that applies to all objects in the account or container. Objects in the account or container will inherit the default policy unless you explicitly override it by configuring a policy on an individual blob version. For more details, see [Configure a default time-based retention policy](#configure-a-default-time-based-retention-policy). -- Option 2: You can configure a policy on the current version of the blob. This policy can override a default policy configured on the storage account or container, if a default policy exists and it is unlocked. By default, any previous versions that are created after the policy is configured will inherit the policy on the current version of the blob. For more details, see [Configure a retention policy on the current version of a blob](#configure-a-retention-policy-on-the-current-version-of-a-blob). -- Option 3: You can configure a policy on a previous version of a blob. This policy can override a default policy configured on the current version, if one exists and it is unlocked. For more details, see [Configure a retention policy on a previous version of a blob](#configure-a-retention-policy-on-a-previous-version-of-a-blob). +- Option 1: You can configure a default policy on the storage account or container that applies to all objects in the account or container. Objects in the account or container will inherit the default policy unless you explicitly override it by configuring a policy on an individual blob version. For more information, see [Configure a default time-based retention policy](#configure-a-default-time-based-retention-policy). +- Option 2: You can configure a policy on the current version of the blob. This policy can override a default policy configured on the storage account or container, if a default policy exists and it's unlocked. By default, any previous versions that are created after the policy is configured will inherit the policy on the current version of the blob. For more information, see [Configure a retention policy on the current version of a blob](#configure-a-retention-policy-on-the-current-version-of-a-blob). +- Option 3: You can configure a policy on a previous version of a blob. This policy can override a default policy configured on the current version, if one exists and it's unlocked. For more information, see [Configure a retention policy on a previous version of a blob](#configure-a-retention-policy-on-a-previous-version-of-a-blob). For more information on blob versioning, see [Blob versioning](versioning-overview.md). @@ -354,7 +354,7 @@ You can view the properties for a blob to see whether a policy is enabled on the ### Configure a retention policy on a previous version of a blob -You can also configure a time-based retention policy on a previous version of a blob. A previous version is always immutable in that it cannot be modified. However, a previous version can be deleted. A time-based retention policy protects against deletion while it is in effect. +You can also configure a time-based retention policy on a previous version of a blob. A previous version is always immutable in that it can't be modified. However, a previous version can be deleted. A time-based retention policy protects against deletion while it is in effect. To configure a time-based retention policy on a previous version of a blob, follow these steps: @@ -408,7 +408,7 @@ az storage blob immutability-policy set \ When you use the Azure portal to upload a blob to a container that supports version-level immutability, you have several options for configuring a time-based retention policy for the new blob: -- Option 1: If a default retention policy is configured for the container, you can upload the blob with the container's policy. This option is selected by default when there is a retention policy on the container. +- Option 1: If a default retention policy is configured for the container, you can upload the blob with the container's policy. This option is selected by default when there's a retention policy on the container. - Option 2: If a default retention policy is configured for the container, you can choose to override the default policy, either by defining a custom retention policy for the new blob, or by uploading the blob with no policy. - Option 3: If no default policy is configured for the container, then you can upload the blob with a custom policy, or with no policy. @@ -416,13 +416,13 @@ To configure a time-based retention policy when you upload a blob, follow these 1. Navigate to the desired container, and select **Upload**. 1. In the **Upload** blob dialog, expand the **Advanced** section. -1. Configure the time-based retention policy for the new blob in the **Retention policy** field. If there is a default policy configured for the container, that policy is selected by default. You can also specify a custom policy for the blob. +1. Configure the time-based retention policy for the new blob in the **Retention policy** field. If there's a default policy configured for the container, that policy is selected by default. You can also specify a custom policy for the blob. :::image type="content" source="media/immutable-policy-configure-version-scope/configure-retention-policy-blob-upload.png" alt-text="Screenshot showing options for configuring retention policy on blob upload in Azure portal"::: ## Modify or delete an unlocked retention policy -You can modify an unlocked time-based retention policy to shorten or lengthen the retention interval. You can also delete an unlocked policy. Editing or deleting an unlocked time-based retention policy for a blob version does not affect policies in effect for any other versions. If there is a default time-based retention policy in effect for the container, then the blob version with the modified or deleted policy will no longer inherit from the container. +You can modify an unlocked time-based retention policy to shorten or lengthen the retention interval. You can also delete an unlocked policy. Editing or deleting an unlocked time-based retention policy for a blob version doesn't affect policies in effect for any other versions. If there's a default time-based retention policy in effect for the container, then the blob version with the modified or deleted policy will no longer inherit from the container. ### [Portal](#tab/azure-portal) @@ -493,9 +493,9 @@ az storage blob immutability-policy delete \ ## Lock a time-based retention policy -When you have finished testing a time-based retention policy, you can lock the policy. A locked policy is compliant with SEC 17a-4(f) and other regulatory compliance. You can lengthen the retention interval for a locked policy up to five times, but you cannot shorten it. +When you have finished testing a time-based retention policy, you can lock the policy. A locked policy is compliant with SEC 17a-4(f) and other regulatory compliance. You can lengthen the retention interval for a locked policy up to five times, but you can't shorten it. -After a policy is locked, you cannot delete it. However, you can delete the blob after the retention interval has expired. +After a policy is locked, you can't delete it. However, you can delete the blob after the retention interval has expired. ### [Portal](#tab/azure-portal) diff --git a/articles/storage/blobs/storage-quickstart-blobs-go.md b/articles/storage/blobs/storage-quickstart-blobs-go.md index 7717ea853d76..d2c832f001ea 100644 --- a/articles/storage/blobs/storage-quickstart-blobs-go.md +++ b/articles/storage/blobs/storage-quickstart-blobs-go.md @@ -66,7 +66,7 @@ Run the following AzureCli command to assign the storage account permissions: az role assignment create --assignee "" --role "Storage Blob Data Contributor" --scope "" ``` -Learn more about Azure's built-in RBAC roles, check out [Built-in roles](/azure/role-based-access-control/built-in-roles). +Learn more about Azure's built-in RBAC roles, check out [Built-in roles](../../role-based-access-control/built-in-roles.md). > Note: Azure Cli has built in helper fucntions that retrieve the storage access keys when permissions are not detected. That functionally does not transfer to the DefaultAzureCredential, which is the reason for assiging RBAC roles to your account. @@ -271,4 +271,4 @@ See these other resources for Go development with Blob storage: ## Next steps -In this quickstart, you learned how to transfer files between a local disk and Azure blob storage using Go. For more information about the Azure Storage Blob SDK, view the [Source Code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob) and [API Reference](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). +In this quickstart, you learned how to transfer files between a local disk and Azure blob storage using Go. For more information about the Azure Storage Blob SDK, view the [Source Code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob) and [API Reference](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). \ No newline at end of file diff --git a/articles/storage/common/storage-network-security.md b/articles/storage/common/storage-network-security.md index e9e95f3fbcb5..f5b668184dab 100644 --- a/articles/storage/common/storage-network-security.md +++ b/articles/storage/common/storage-network-security.md @@ -458,15 +458,12 @@ You can manage IP network rules for storage accounts through the Azure portal, P -## Grant access from Azure resource instances (preview) +## Grant access from Azure resource instances In some cases, an application might depend on Azure resources that cannot be isolated through a virtual network or an IP address rule. However, you'd still like to secure and restrict storage account access to only your application's Azure resources. You can configure storage accounts to allow access to specific resource instances of some Azure services by creating a resource instance rule. The types of operations that a resource instance can perform on storage account data is determined by the Azure role assignments of the resource instance. Resource instances must be from the same tenant as your storage account, but they can belong to any subscription in the tenant. -> [!NOTE] -> This feature is in public preview and is available in all public cloud regions. - ### [Portal](#tab/azure-portal) You can add or remove resource network rules in the Azure portal. @@ -494,22 +491,6 @@ You can use PowerShell commands to add or remove resource network rules. > [!IMPORTANT] > Be sure to [set the default rule](#change-the-default-network-access-rule) to **deny**, or network rules have no effect. -#### Install the preview module - -Install the latest version of the PowershellGet module. Then, close and reopen the PowerShell console. - -```powershell -install-Module PowerShellGet –Repository PSGallery –Force -``` - -Install **Az. Storage** preview module. - -```powershell -Install-Module Az.Storage -Repository PsGallery -RequiredVersion 3.0.1-preview -AllowClobber -AllowPrerelease -Force -``` - -For more information about how to install PowerShell modules, see [Install the Azure PowerShell module](/powershell/azure/install-az-ps) - #### Grant access Add a network rule that grants access from a resource instance. @@ -574,24 +555,6 @@ $rule.ResourceAccessRules You can use Azure CLI commands to add or remove resource network rules. -#### Install the preview extension - -1. Open the [Azure Cloud Shell](../../cloud-shell/overview.md), or if you've [installed](/cli/azure/install-azure-cli) the Azure CLI locally, open a command console application such as Windows PowerShell. - -2. Then, verify that the version of Azure CLI that you have installed is `2.13.0` or higher by using the following command. - - ```azurecli - az --version - ``` - - If your version of Azure CLI is lower than `2.13.0`, then install a later version. See [Install the Azure CLI](/cli/azure/install-azure-cli). - -3. Type the following command to install the preview extension. - - ```azurecli - az extension add -n storage-preview - ``` - #### Grant access Add a network rule that grants access from a resource instance. @@ -674,7 +637,7 @@ If your account does not have the hierarchical namespace feature enabled on it, You can use the same technique for an account that has the hierarchical namespace feature enable on it. However, you don't have to assign an Azure role if you add the managed identity to the access control list (ACL) of any directory or blob contained in the storage account. In that case, the scope of access for the instance corresponds to the directory or file to which the managed identity has been granted access. You can also combine Azure roles and ACLs together. To learn more about how to combine them together to grant access, see [Access control model in Azure Data Lake Storage Gen2](../blobs/data-lake-storage-access-control-model.md). > [!TIP] -> The recommended way to grant access to specific resources is to use resource instance rules. To grant access to specific resource instances, see the [Grant access from Azure resource instances (preview)](#grant-access-specific-instances) section of this article. +> The recommended way to grant access to specific resources is to use resource instance rules. To grant access to specific resource instances, see the [Grant access from Azure resource instances](#grant-access-specific-instances) section of this article. | Service | Resource Provider Name | Purpose | | :----------------------------- | :------------------------------------- | :----------------- | diff --git a/articles/storage/common/storage-redundancy.md b/articles/storage/common/storage-redundancy.md index 950681ce6021..b795feab9d23 100644 --- a/articles/storage/common/storage-redundancy.md +++ b/articles/storage/common/storage-redundancy.md @@ -7,7 +7,7 @@ author: tamram ms.service: storage ms.topic: conceptual -ms.date: 05/12/2022 +ms.date: 05/24/2022 ms.author: tamram ms.subservice: common ms.custom: references_regions @@ -108,6 +108,7 @@ Premium block blobs are available in a subset of Azure regions: - (North America) East US - (North America) East US 2 - (North America) West US 2 +- (North America) South Central US - (South America) Brazil South #### Premium file share accounts @@ -168,7 +169,12 @@ Only standard general-purpose v2 storage accounts support GZRS. GZRS is supporte ## Read access to data in the secondary region -Geo-redundant storage (with GRS or GZRS) replicates your data to another physical location in the secondary region to protect against regional outages. However, that data is available to be read only if the customer or Microsoft initiates a failover from the primary to secondary region. When you enable read access to the secondary region, your data is always available to be read, including in a situation where the primary region becomes unavailable. For read access to the secondary region, enable read-access geo-redundant storage (RA-GRS) or read-access geo-zone-redundant storage (RA-GZRS). +Geo-redundant storage (with GRS or GZRS) replicates your data to another physical location in the secondary region to protect against regional outages. With an account configured for GRS or GZRS, data in the secondary region is not directly accessible to users or applications, unless a failover occurs. The failover process updates the DNS entry provided by Azure Storage so that the secondary endpoint becomes the new primary endpoint for your storage account. During the failover process, your data is inaccessible. After the failover is complete, you can read and write data to the new primary region. For more information about failover and disaster recovery, see [How an account failover works](storage-disaster-recovery-guidance.md#how-an-account-failover-works). + +If your applications require high availability, then you can configure your storage account for read access to the secondary region. When you enable read access to the secondary region, then your data is always available to be read from the secondary, including in a situation where the primary region becomes unavailable. Read-access geo-redundant storage (RA-GRS) or read-access geo-zone-redundant storage (RA-GZRS) configurations permit read access to the secondary region. + +> [!CAUTION] +> Because data is replicated asynchronously from the primary to the secondary region, the secondary region is typically behind the primary region in terms of write operations. If a disaster were to strike the primary region, it's likely that some data would be lost. For more information about how to plan for potential data loss, see [Anticipate data loss](storage-disaster-recovery-guidance.md#anticipate-data-loss). > [!NOTE] > Azure Files does not support read-access geo-redundant storage (RA-GRS) or read-access geo-zone-redundant storage (RA-GZRS). diff --git a/articles/storage/file-sync/file-sync-deployment-guide.md b/articles/storage/file-sync/file-sync-deployment-guide.md index e3bd9b3b6050..f7201ad27965 100644 --- a/articles/storage/file-sync/file-sync-deployment-guide.md +++ b/articles/storage/file-sync/file-sync-deployment-guide.md @@ -4,7 +4,7 @@ description: Learn how to deploy Azure File Sync, from start to finish, using th author: khdownie ms.service: storage ms.topic: how-to -ms.date: 04/12/2022 +ms.date: 05/24/2022 ms.author: kendownie ms.subservice: files ms.custom: devx-track-azurepowershell, devx-track-azurecli @@ -483,9 +483,6 @@ Currently, pre-seeding approach has a few limitations - ## Self-service restore through Previous Versions and VSS (Volume Shadow Copy Service) -> [!IMPORTANT] -> The following information can only be used with version 9 (or above) of the storage sync agent. Versions lower than 9 will not have the StorageSyncSelfService cmdlets. - Previous Versions is a Windows feature that allows you to utilize server-side VSS snapshots of a volume to present restorable versions of a file to an SMB client. This enables a powerful scenario, commonly referred to as self-service restore, directly for information workers instead of depending on the restore from an IT admin. diff --git a/articles/storage/file-sync/file-sync-disaster-recovery-best-practices.md b/articles/storage/file-sync/file-sync-disaster-recovery-best-practices.md index 726007b71232..aa3347af0b5a 100644 --- a/articles/storage/file-sync/file-sync-disaster-recovery-best-practices.md +++ b/articles/storage/file-sync/file-sync-disaster-recovery-best-practices.md @@ -4,7 +4,7 @@ description: Learn about best practices for disaster recovery with Azure File Sy author: khdownie ms.service: storage ms.topic: how-to -ms.date: 08/18/2021 +ms.date: 05/24/2022 ms.author: kendownie ms.subservice: files --- @@ -54,7 +54,7 @@ If you enable cloud tiering, don't implement an on-premises backup solution. Wit If you decide to use an on-premises backup solution, backups should be performed on a server in the sync group with cloud tiering disabled. When performing a restore, use the volume-level or file-level restore options. Files restored using the file-level restore option will sync to all endpoints in the sync group and existing files will be replaced with the version restored from backup. Volume-level restores won't replace newer file versions in the cloud endpoint or other server endpoints. -In Azure File Sync agent version 9 and above, [Volume Shadow Copy Service (VSS) snapshots](file-sync-deployment-guide.md#self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service) (including the **Previous Versions** tab) are supported on volumes with cloud tiering enabled. This allows you to perform self-service restores instead of relying on an admin to perform restores for you. However, you must enable previous version compatibility through PowerShell, which will increase your snapshot storage costs. VSS snapshots don't protect against disasters on the server endpoint itself, so they should only be used alongside cloud-side backups. For details, see [Self Service restore through Previous Versions and VSS](file-sync-deployment-guide.md#self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service). +[Volume Shadow Copy Service (VSS) snapshots](file-sync-deployment-guide.md#self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service) (including the **Previous Versions** tab) are supported on volumes with cloud tiering enabled. This allows you to perform self-service restores instead of relying on an admin to perform restores for you. However, you must enable previous version compatibility through PowerShell, which will increase your snapshot storage costs. VSS snapshots don't protect against disasters on the server endpoint itself, so they should only be used alongside cloud-side backups. For details, see [Self Service restore through Previous Versions and VSS](file-sync-deployment-guide.md#self-service-restore-through-previous-versions-and-vss-volume-shadow-copy-service). ## Data redundancy diff --git a/articles/storage/file-sync/file-sync-planning.md b/articles/storage/file-sync/file-sync-planning.md index 5d89b3c8780f..540da4c1346b 100644 --- a/articles/storage/file-sync/file-sync-planning.md +++ b/articles/storage/file-sync/file-sync-planning.md @@ -211,8 +211,8 @@ In this case, Azure File Sync would need about 209,500,000 KiB (209.5 GiB) of sp > The Azure File Sync agent must be installed on every node in a Failover Cluster for sync to work correctly. ### Data Deduplication -**Windows Server 2016 and Windows Server 2019** -Data Deduplication is supported irrespective of whether cloud tiering is enabled or disabled on one or more server endpoints on the volume for Windows Server 2016 and Windows Server 2019. Enabling Data Deduplication on a volume with cloud tiering enabled lets you cache more files on-premises without provisioning more storage. +**Windows Server 2022, Windows Server 2019, and Windows Server 2016** +Data Deduplication is supported irrespective of whether cloud tiering is enabled or disabled on one or more server endpoints on the volume for Windows Server 2016, Windows Server 2019, and Windows Server 2022. Enabling Data Deduplication on a volume with cloud tiering enabled lets you cache more files on-premises without provisioning more storage. When Data Deduplication is enabled on a volume with cloud tiering enabled, Dedup optimized files within the server endpoint location will be tiered similar to a normal file based on the cloud tiering policy settings. Once the Dedup optimized files have been tiered, the Data Deduplication garbage collection job will run automatically to reclaim disk space by removing unnecessary chunks that are no longer referenced by other files on the volume. @@ -232,9 +232,9 @@ Azure File Sync does not support Data Deduplication and cloud tiering on the sam - For ongoing Deduplication optimization jobs, cloud tiering with date policy will get delayed by the Data Deduplication [MinimumFileAgeDays](/powershell/module/deduplication/set-dedupvolume) setting, if the file is not already tiered. - Example: If the MinimumFileAgeDays setting is seven days and cloud tiering date policy is 30 days, the date policy will tier files after 37 days. - Note: Once a file is tiered by Azure File Sync, the Deduplication optimization job will skip the file. -- If a server running Windows Server 2012 R2 with the Azure File Sync agent installed is upgraded to Windows Server 2016 or Windows Server 2019, the following steps must be performed to support Data Deduplication and cloud tiering on the same volume: +- If a server running Windows Server 2012 R2 with the Azure File Sync agent installed is upgraded to Windows Server 2016, Windows Server 2019 or Windows Server 2022, the following steps must be performed to support Data Deduplication and cloud tiering on the same volume: - Uninstall the Azure File Sync agent for Windows Server 2012 R2 and restart the server. - - Download the Azure File Sync agent for the new server operating system version (Windows Server 2016 or Windows Server 2019). + - Download the Azure File Sync agent for the new server operating system version (Windows Server 2016, Windows Server 2019, or Windows Server 2022). - Install the Azure File Sync agent and restart the server. Note: The Azure File Sync configuration settings on the server are retained when the agent is uninstalled and reinstalled. diff --git a/articles/storage/files/files-nfs-protocol.md b/articles/storage/files/files-nfs-protocol.md index 671666cd7e50..bb2f001130e8 100644 --- a/articles/storage/files/files-nfs-protocol.md +++ b/articles/storage/files/files-nfs-protocol.md @@ -4,7 +4,7 @@ description: Learn about file shares hosted in Azure Files using the Network Fil author: khdownie ms.service: storage ms.topic: conceptual -ms.date: 04/19/2022 +ms.date: 05/25/2022 ms.author: kendownie ms.subservice: files ms.custom: references_regions @@ -16,7 +16,7 @@ Azure Files offers two industry-standard file system protocols for mounting Azur This article covers NFS Azure file shares. For information about SMB Azure file shares, see [SMB file shares in Azure Files](files-smb-protocol.md). > [!IMPORTANT] -> Before using NFS file shares for production, see the [Troubleshoot Azure NFS file shares](storage-troubleshooting-files-nfs.md) article for a list of known issues. +> NFS Azure file shares are not supported for Windows clients. Before using NFS Azure file shares for production, see the [Troubleshoot NFS Azure file shares](storage-troubleshooting-files-nfs.md) article for a list of known issues. ## Common scenarios NFS file shares are often used in the following scenarios: @@ -88,11 +88,11 @@ NFS Azure file shares are only offered on premium file shares, which store data ## Workloads > [!IMPORTANT] -> Before using NFS file shares for production, see the [Troubleshoot Azure NFS file shares](storage-troubleshooting-files-nfs.md) article for a list of known issues. +> Before using NFS Azure file shares for production, see [Troubleshoot NFS Azure file shares](storage-troubleshooting-files-nfs.md) for a list of known issues. NFS has been validated to work well with workloads such as SAP application layer, database backups, database replication, messaging queues, home directories for general purpose file servers, and content repositories for application workloads. -The following workloads have known issues. See the [Troubleshoot Azure NFS file shares](storage-troubleshooting-files-nfs.md) article for list of known issues: +The following workloads have known issues: - Oracle Database will experience incompatibility with its dNFS feature. diff --git a/articles/storage/files/storage-files-quick-create-use-linux.md b/articles/storage/files/storage-files-quick-create-use-linux.md index 2856e38a6f1c..324831335a09 100644 --- a/articles/storage/files/storage-files-quick-create-use-linux.md +++ b/articles/storage/files/storage-files-quick-create-use-linux.md @@ -192,7 +192,7 @@ Now that you've created an NFS share, to use it you have to mount it on your Lin 1. You should see **Connect to this NFS share from Linux** along with sample commands to use NFS on your Linux distribution and a provided mounting script. > [!IMPORTANT] - > The provided mounting script will mount the NFS share only until the Linux machine is rebooted. To automatically mount the share every time the machine reboots, use a [static mount with /etc/fstab](storage-how-to-use-files-linux.md#static-mount-with-etcfstab). + > The provided mounting script will mount the NFS share only until the Linux machine is rebooted. To automatically mount the share every time the machine reboots, [add an entry in /etc/fstab](storage-how-to-use-files-linux.md#static-mount-with-etcfstab). For more information, enter the command `man fstab` from the Linux command line. :::image type="content" source="media/storage-files-quick-create-use-linux/mount-nfs-share.png" alt-text="Screenshot showing how to connect to an N F S file share from Linux using a provided mounting script." lightbox="media/storage-files-quick-create-use-linux/mount-nfs-share.png" border="true"::: diff --git a/articles/storage/files/storage-troubleshoot-windows-file-connection-problems.md b/articles/storage/files/storage-troubleshoot-windows-file-connection-problems.md index 8c00f42741cd..cbd3bf581888 100644 --- a/articles/storage/files/storage-troubleshoot-windows-file-connection-problems.md +++ b/articles/storage/files/storage-troubleshoot-windows-file-connection-problems.md @@ -4,7 +4,7 @@ description: Troubleshooting Azure Files problems in Windows. See common issues author: khdownie ms.service: storage ms.topic: troubleshooting -ms.date: 01/31/2022 +ms.date: 05/26/2022 ms.author: kendownie ms.subservice: files ms.custom: devx-track-azurepowershell @@ -113,11 +113,11 @@ TcpTestSucceeded : True ### Solution for cause 1 -#### Solution 1 — Use Azure File Sync -Azure File Sync can transform your on-premises Windows Server into a quick cache of your Azure file share. You can use any protocol that's available on Windows Server to access your data locally, including SMB, NFS, and FTPS. Azure File Sync works over port 443 and can thus be used as a workaround to access Azure Files from clients that have port 445 blocked. [Learn how to setup Azure File Sync](../file-sync/file-sync-extend-servers.md). +#### Solution 1 — Use Azure File Sync as a QUIC endpoint +Azure File Sync can be used as a workaround to access Azure Files from clients that have port 445 blocked. Although Azure Files doesn't directly support SMB over QUIC, Windows Server 2022 Azure Edition does support the QUIC protocol. You can create a lightweight cache of your Azure file shares on a Windows Server 2022 Azure Edition VM using Azure File Sync. This uses port 443, which is widely open outbound to support HTTPS, instead of port 445. To learn more about this option, see [SMB over QUIC with Azure File Sync](storage-files-networking-overview.md#smb-over-quic). -#### Solution 2 — Use VPN -By Setting up a VPN to your specific Storage Account, the traffic will go through a secure tunnel as opposed to over the internet. Follow the [instructions to setup VPN](storage-files-configure-p2s-vpn-windows.md) to access Azure Files from Windows. +#### Solution 2 — Use VPN or ExpressRoute +By setting up a VPN or ExpressRoute from on-premises to your Azure storage account, with Azure Files exposed on your internal network using private endpoints, the traffic will go through a secure tunnel as opposed to over the internet. Follow the [instructions to setup VPN](storage-files-configure-p2s-vpn-windows.md) to access Azure Files from Windows. #### Solution 3 — Unblock port 445 with help of your ISP/IT Admin Work with your IT department or ISP to open port 445 outbound to [Azure IP ranges](https://www.microsoft.com/download/details.aspx?id=41653). diff --git a/articles/storage/files/storage-troubleshooting-files-nfs.md b/articles/storage/files/storage-troubleshooting-files-nfs.md index 483b7bf3710d..6ce50986b03d 100644 --- a/articles/storage/files/storage-troubleshooting-files-nfs.md +++ b/articles/storage/files/storage-troubleshooting-files-nfs.md @@ -1,18 +1,21 @@ --- -title: Troubleshoot Azure NFS file share problems - Azure Files -description: Troubleshoot Azure NFS file share problems. +title: Troubleshoot NFS file share problems - Azure Files +description: Troubleshoot NFS Azure file share problems. author: khdownie ms.service: storage ms.topic: troubleshooting -ms.date: 09/15/2020 +ms.date: 05/25/2022 ms.author: kendownie ms.subservice: files ms.custom: references_regions, devx-track-azurepowershell --- -# Troubleshoot Azure NFS file share problems +# Troubleshoot NFS Azure file share problems -This article lists some common problems and known issues related to Azure NFS file shares. It provides potential causes and workarounds when these problems are encountered. +This article lists some common problems and known issues related to NFS Azure file shares. It provides potential causes and workarounds when these problems are encountered. + +> [!IMPORTANT] +> NFS Azure file shares are not supported for Windows clients. ## Applies to | File share type | SMB | NFS | @@ -51,7 +54,7 @@ NFS is only available on storage accounts with the following configuration: Follow the instructions in our article: [How to create an NFS share](storage-files-how-to-create-nfs-shares.md). -## Cannot connect to or mount an Azure NFS file share +## Cannot connect to or mount an NFS Azure file share ### Cause 1: Request originates from a client in an untrusted network/untrusted IP diff --git a/articles/storage/queues/storage-ruby-how-to-use-queue-storage.md b/articles/storage/queues/storage-ruby-how-to-use-queue-storage.md index bc7214c8dbb3..fd5d052315b8 100644 --- a/articles/storage/queues/storage-ruby-how-to-use-queue-storage.md +++ b/articles/storage/queues/storage-ruby-how-to-use-queue-storage.md @@ -167,4 +167,4 @@ Now that you've learned the basics of Queue Storage, follow these links to learn - Visit the [Azure Storage team blog](/archive/blogs/windowsazurestorage/) - Visit the [Azure SDK for Ruby](https://github.com/WindowsAzure/azure-sdk-for-ruby) repository on GitHub -For a comparison between Azure Queue Storage discussed in this article and Azure Service Bus queues discussed in [How to use Service Bus queues](/azure/service-bus-messaging/service-bus-quickstart-portal), see [Azure Queue Storage and Service Bus queues - compared and contrasted](../../service-bus-messaging/service-bus-azure-and-service-bus-queues-compared-contrasted.md) +For a comparison between Azure Queue Storage discussed in this article and Azure Service Bus queues discussed in [How to use Service Bus queues](../../service-bus-messaging/service-bus-quickstart-portal.md), see [Azure Queue Storage and Service Bus queues - compared and contrasted](../../service-bus-messaging/service-bus-azure-and-service-bus-queues-compared-contrasted.md) \ No newline at end of file diff --git a/articles/storage/solution-integration/validated-partners/backup-archive-disaster-recovery/tiger-bridge-cdp-guide.md b/articles/storage/solution-integration/validated-partners/backup-archive-disaster-recovery/tiger-bridge-cdp-guide.md index 3b1b69c352a6..c3c1d049edae 100644 --- a/articles/storage/solution-integration/validated-partners/backup-archive-disaster-recovery/tiger-bridge-cdp-guide.md +++ b/articles/storage/solution-integration/validated-partners/backup-archive-disaster-recovery/tiger-bridge-cdp-guide.md @@ -12,7 +12,7 @@ ms.subservice: partner # Tiger Bridge archiving with continuous data protection and disaster recovery -This article will guide you to set up Tiger Bridge data management system with Azure Blob Storage. Tiger Bridge Continuous data protection (CDP) integrates with [Soft Delete](/azure/storage/blobs/soft-delete-blob-overview) and [Versioning](/azure/storage/blobs/versioning-overview) to achieve a complete Continuous Data Protection solution. It applies policies to move data between [Azure Blob tiers](/azure/storage/blobs/access-tiers-overview) for optimal cost. Continuous data protection allows customers to have a real-time file-based backup with snapshots to achieve near zero RPO. CDP enables customers to protect their assets with minimum resources. Optionally, it can be used in WORM scenario using [immutable storage](/azure/storage/blobs/immutable-storage-overview). +This article will guide you to set up Tiger Bridge data management system with Azure Blob Storage. Tiger Bridge Continuous data protection (CDP) integrates with [Soft Delete](../../../blobs/soft-delete-blob-overview.md) and [Versioning](../../../blobs/versioning-overview.md) to achieve a complete Continuous Data Protection solution. It applies policies to move data between [Azure Blob tiers](../../../blobs/access-tiers-overview.md) for optimal cost. Continuous data protection allows customers to have a real-time file-based backup with snapshots to achieve near zero RPO. CDP enables customers to protect their assets with minimum resources. Optionally, it can be used in WORM scenario using [immutable storage](../../../blobs/immutable-storage-overview.md). In addition, Tiger Bridge provides easy and efficient Disaster Recovery. It can be combined with [Microsoft DFSR](/windows-server/storage/dfs-replication/dfsr-overview), but it isn't mandatory. It allows mirrored DR sites, or can be used with minimum storage DR sites (keeping only the most recent data on-prem plus). All the replicated files in Azure Blob Storage are stored as native objects, allowing the organization to access them without using Tiger Bridge. This approach prevents vendor locking. @@ -20,15 +20,15 @@ All the replicated files in Azure Blob Storage are stored as native objects, all :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-reference-architecture.png" alt-text="Tiger Bridge reference architecture."::: -More information on Tiger Bridge solution, and common use case can be read in [Tiger Bridge deployment guide](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide). +More information on Tiger Bridge solution, and common use case can be read in [Tiger Bridge deployment guide](../primary-secondary-storage/tiger-bridge-deployment-guide.md). ## Before you begin -- **Refer to [Tiger Bridge deployment guide](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide)**, it describes initial steps needed for setting up CDP. +- **Refer to [Tiger Bridge deployment guide](../primary-secondary-storage/tiger-bridge-deployment-guide.md)**, it describes initial steps needed for setting up CDP. - **Choose the right storage options**. When you use Azure as a backup target, you'll make use of [Azure Blob storage](https://azure.microsoft.com/services/storage/blobs/). Blob storage is optimized for storing massive amounts of unstructured data, which is data that doesn't adhere to any data model, or definition. It's durable, highly available, secure, and scalable. You can select the right storage for your workload by looking at two aspects: - - [Storage redundancy](/azure/storage/common/storage-redundancy) - - [Storage tier](/azure/storage/blobs/access-tiers-overview) + - [Storage redundancy](../../../common/storage-redundancy.md) + - [Storage tier](../../../blobs/access-tiers-overview.md) ### Sample backup to Azure cost model Subscription based model can be daunting to customers who are new to the cloud. While you pay for only the capacity used, you do also pay for transactions (read and write), and egress for data read back to your on-premises environment (depending on the network connection used). We recommend using the [Azure Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) to perform what-if analysis. You can base the analysis on list pricing or on Azure Storage Reserved Capacity pricing, which can deliver up to 38% savings. Below is an example pricing exercise to model the monthly cost of backing up to Azure. @@ -44,13 +44,13 @@ Subscription based model can be daunting to customers who are new to the cloud. > This is only an example. Your pricing may vary due to activities not captured here. Estimate was generated with Azure Pricing Calculator using East US Pay-as-you-go pricing. It is based on a 32 MB block size which generates 65,536 PUT Requests (write transactions), per day. This example may not reflect current Azure pricing, or not be applicable towards your requirements. ## Prepare Azure Blob Storage -Refer to [Tiger Bridge deployment guide](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide) +Refer to [Tiger Bridge deployment guide](../primary-secondary-storage/tiger-bridge-deployment-guide.md) ## Deploy Tiger Bridge Before you can install Tiger Bridge, you need to have a Windows file server installed, and fully functional. Windows server must have access to the storage account prepare in [previous step](#prepare-azure-blob-storage). ## Configure continuous data protection -1. Deploy Tiger Bridge solution as described in [standalone hybrid configuration](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide#deploy-standalone-hybrid-configuration) (steps 1 to 4). +1. Deploy Tiger Bridge solution as described in [standalone hybrid configuration](../primary-secondary-storage/tiger-bridge-deployment-guide.md#deploy-standalone-hybrid-configuration) (steps 1 to 4). 1. Under Tiger Bridge settings, enable **Delete replica when source file is removed** and **Keep replica versions** :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-settings.png" alt-text="Screenshot that shows how to enable settings for CDP."::: 1. Set versioning policy either **By Age** or **By Count** @@ -71,7 +71,7 @@ Tiger Bridge can move a replicated file between Azure Blob Storage tiers to opti :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-pair-account.png" alt-text="Screenshot that shows how to pair a storage account with local source."::: - Change **Default access tier** to **Archive**. You can also select a default **[Rehydration priority](/azure/storage/blobs/archive-rehydrate-to-online-tier)**. + Change **Default access tier** to **Archive**. You can also select a default **[Rehydration priority](../../../blobs/archive-rehydrate-to-online-tier.md)**. :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-change-access-tier.png" alt-text="Screenshot that shows how to change a default access tier in Tiger Bridge Configuration."::: @@ -93,7 +93,7 @@ Tiger Bridge can be configured in Disaster Recovery mode. Typical configuration :::image type="content" source="./media/tiger-bridge-cdp-guide/tiger-bridge-dr-active-passive.png" alt-text="Architecture for Tiger Bridge in active - passive DR configuration."::: -1. Deploy and setup Tiger Bridge server on the primary and secondary site as instructed in [Tiger Bridge deployment guide](/azure/storage/solution-integration/validated-partners/primary-secondary-storage/tiger-bridge-deployment-guide#deploy-standalone-hybrid-configuration) for standalone hybrid configuration +1. Deploy and setup Tiger Bridge server on the primary and secondary site as instructed in [Tiger Bridge deployment guide](../primary-secondary-storage/tiger-bridge-deployment-guide.md#deploy-standalone-hybrid-configuration) for standalone hybrid configuration > [!NOTE] > Both Tiger Bridge servers on primary and secondary site must be connected to the same container and storage account. diff --git a/articles/stream-analytics/capture-event-hub-data-parquet.md b/articles/stream-analytics/capture-event-hub-data-parquet.md index 0b412407e1ff..ca00316221c9 100644 --- a/articles/stream-analytics/capture-event-hub-data-parquet.md +++ b/articles/stream-analytics/capture-event-hub-data-parquet.md @@ -39,7 +39,7 @@ Use the following steps to configure a Stream Analytics job to capture data in A 1. For streaming blobs, the directory path pattern is expected to be a dynamic value. It's required for the date to be a part of the file path for the blob – referenced as `{date}`. To learn about custom path patterns, see to [Azure Stream Analytics custom blob output partitioning](stream-analytics-custom-path-patterns-blob-storage-output.md). :::image type="content" source="./media/capture-event-hub-data-parquet/blob-configuration.png" alt-text="First screenshot showing the Blob window where you edit a blob's connection configuration." lightbox="./media/capture-event-hub-data-parquet/blob-configuration.png" ::: 1. Select **Connect** -1. When the connection is established, you will see fields that are present in the output data. +1. When the connection is established, you'll see fields that are present in the output data. 1. Select **Save** on the command bar to save your configuration. 1. Select **Start** on the command bar to start the streaming flow to capture data. Then in the Start Stream Analytics job window: 1. Choose the output start time. @@ -47,10 +47,20 @@ Use the following steps to configure a Stream Analytics job to capture data in A 1. In the **Choose Output data error handling** list, select the behavior you want when the output of the job fails due to data error. Select **Retry** to have the job retry until it writes successfully or select another option. :::image type="content" source="./media/capture-event-hub-data-parquet/start-job.png" alt-text="Screenshot showing the Start Stream Analytics job window where you set the output start time, streaming units, and error handling." lightbox="./media/capture-event-hub-data-parquet/start-job.png" ::: +## Verify output +Verify that the Parquet files are generated in the Azure Data Lake Storage container. + +:::image type="content" source="./media/capture-event-hub-data-parquet/verify-captured-data.png" alt-text="Screenshot showing the generated Parquet files in the ADLS container." lightbox="./media/capture-event-hub-data-parquet/verify-captured-data.png" ::: + + The new job is shown on the **Stream Analytics jobs** tab. Select **Open metrics** to monitor it. :::image type="content" source="./media/capture-event-hub-data-parquet/open-metrics-link.png" alt-text="Screenshot showing Open Metrics link selected." lightbox="./media/capture-event-hub-data-parquet/open-metrics-link.png" ::: +Here's an example screenshot of metrics showing input and output events. + +:::image type="content" source="./media/capture-event-hub-data-parquet/job-metrics.png" alt-text="Screenshot showing metrics of the Stream Analytics job." lightbox="./media/capture-event-hub-data-parquet/job-metrics.png" ::: + ## Next steps Now you know how to use the Stream Analytics no code editor to create a job that captures Event Hubs data to Azure Data Lake Storage Gen2 in Parquet format. Next, you can learn more about Azure Stream Analytics and how to monitor the job that you created. diff --git a/articles/stream-analytics/event-hubs-parquet-capture-tutorial.md b/articles/stream-analytics/event-hubs-parquet-capture-tutorial.md index 97c44c6b0953..79fdc98cb987 100644 --- a/articles/stream-analytics/event-hubs-parquet-capture-tutorial.md +++ b/articles/stream-analytics/event-hubs-parquet-capture-tutorial.md @@ -5,7 +5,7 @@ author: sidramadoss ms.author: sidram ms.service: stream-analytics ms.topic: how-to -ms.date: 05/23/2022 +ms.date: 05/25/2022 ms.custom: seodec18 --- @@ -27,40 +27,54 @@ In this tutorial, you learn how to: Before you start, make sure you've completed the following steps: * If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/). -* Deploy the TollApp event generator to Azure, use this link to [Deploy TollApp Azure Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-stream-analytics%2Fmaster%2FSamples%2FTollApp%2FVSProjects%2FTollAppDeployment%2Fazuredeploy.json). Set the 'interval' parameter to 1. And use a new resource group for this. +* Deploy the TollApp event generator to Azure, use this link to [Deploy TollApp Azure Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-stream-analytics%2Fmaster%2FSamples%2FTollApp%2FVSProjects%2FTollAppDeployment%2Fazuredeploy.json). Set the 'interval' parameter to 1. And use a new resource group for this step. * Create an [Azure Synapse Analytics workspace](../synapse-analytics/get-started-create-workspace.md) with a Data Lake Storage Gen2 account. ## Use no code editor to create a Stream Analytics job 1. Locate the Resource Group in which the TollApp event generator was deployed. -2. Select the Azure Event Hubs namespace. And then under the Event Hubs section, select **entrystream** instance. -3. Go to **Process data** under Features section and then click **start** on the Capture in parquet format template. -[ ![Screenshot of start capture experience from process data blade.](./media/stream-analytics-no-code/parquet-capture-start.png) ](./media/stream-analytics-no-code/parquet-capture-start.png#lightbox) -4. Name your job **parquetcapture** and select **Create**. -5. Configure your event hub input by specifying - * Consumer Group: Default - * Serialization type of your input data: JSON - * Authentication mode that the job will use to connect to your event hub: Connection String defaults - * Click **Connect** -6. Within few seconds, you'll see sample input data and the schema. You can choose to drop fields, rename fields or change data type. -[![Screenshot of event hub data and schema in no code editor.](./media/stream-analytics-no-code/event-hub-data-preview.png)](./media/stream-analytics-no-code/event-hub-data-preview.png#lightbox) -7. Click the Azure Data Lake Storage Gen2 tile on your canvas and configure it by specifying +2. Select the Azure Event Hubs **namespace**. +1. On the **Event Hubs Namespace** page, select **Event Hubs** under **Entities** on the left menu. +1. Select **entrystream** instance. + + :::image type="content" source="./media/stream-analytics-no-code/select-event-hub.png" alt-text="Screenshot showing the selection of the event hub." lightbox="./media/stream-analytics-no-code/select-event-hub.png"::: +3. On the **Event Hubs instance** page, select **Process data** in the **Features** section on the left menu. +1. Select **Start** on the **Capture data to ADLS Gen2 in Parquet format** tile. + + :::image type="content" source="./media/stream-analytics-no-code/parquet-capture-start.png" alt-text="Screenshot showing the selection of the **Capture data to ADLS Gen2 in Parquet format** tile." lightbox="./media/stream-analytics-no-code/parquet-capture-start.png"::: +1. Name your job **parquetcapture** and select **Create**. + + :::image type="content" source="./media/stream-analytics-no-code/new-stream-analytics-job.png" alt-text="Screenshot of the New Stream Analytics job page." lightbox="./media/stream-analytics-no-code/new-stream-analytics-job.png"::: +1. On the **event hub** configuration page, confirm the following settings, and then select **Connect**. + - *Consumer Group*: Default + - *Serialization type* of your input data: JSON + - *Authentication mode* that the job will use to connect to your event hub: Connection string. + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png" alt-text="Screenshot of the configuration page for your event hub." lightbox="./media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png"::: +1. Within few seconds, you'll see sample input data and the schema. You can choose to drop fields, rename fields or change data type. + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/data-preview.png" alt-text="Screenshot showing the fields and preview of data." lightbox="./media/event-hubs-parquet-capture-tutorial/data-preview.png"::: +1. Select the **Azure Data Lake Storage Gen2** tile on your canvas and configure it by specifying * Subscription where your Azure Data Lake Gen2 account is located in - * Storage account name which should be the same ADLS Gen2 account used with your Azure Synapse Analytics workspace done in the Prerequisites section. + * Storage account name, which should be the same ADLS Gen2 account used with your Azure Synapse Analytics workspace done in the Prerequisites section. * Container inside which the Parquet files will be created. * Path pattern set to *{date}/{time}* * Date and time pattern as the default *yyyy-mm-dd* and *HH*. - * Click **Connect** -8. Select **Save** in the top ribbon to save your job and then select **Start**. Set Streaming Unit count to 3 and then Select **Start** to run your job. -[![Screenshot of start job in no code editor.](./media/stream-analytics-no-code/no-code-start-job.png)](./media/stream-analytics-no-code/no-code-start-job.png#lightbox) -9. You'll then see a list of all Stream Analytics jobs created using the no code editor. And within two minutes, your job will go to a **Running** state. -[![Screenshot of job in running state after job creation.](./media/stream-analytics-no-code/no-code-job-running-state.png)](./media/stream-analytics-no-code/no-code-job-running-state.png#lightbox) + * Select **Connect** + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png" alt-text="Screenshot showing the configuration settings for the Data Lake Storage." lightbox="./media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png"::: +1. Select **Save** in the top ribbon to save your job and then select **Start**. Set Streaming Unit count to 3 and then Select **Start** to run your job. + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/start-job.png" alt-text="Screenshot showing the Start Stream Analytics Job page." lightbox="./media/event-hubs-parquet-capture-tutorial/start-job.png"::: +1. You'll then see a list of all Stream Analytics jobs created using the no code editor. And within two minutes, your job will go to a **Running** state. Select the **Refresh** button on the page to see the status changing from Created -> Starting -> Running. + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/job-list.png" alt-text="Screenshot showing the list of Stream Analytics jobs." lightbox="./media/event-hubs-parquet-capture-tutorial/job-list.png"::: ## View output in your Azure Data Lake Storage Gen 2 account 1. Locate the Azure Data Lake Storage Gen2 account you had used in the previous step. 2. Select the container you had used in the previous step. You'll see parquet files created based on the *{date}/{time}* path pattern used in the previous step. [![Screenshot of parquet files in Azure Data Lake Storage Gen 2.](./media/stream-analytics-no-code/capture-parquet-files.png)](./media/stream-analytics-no-code/capture-parquet-files.png#lightbox) -## Query event hub Capture files in Parquet format with Azure Synapse Analytics +## Query captured data in Parquet format with Azure Synapse Analytics ### Query using Azure Synapse Spark 1. Locate your Azure Synapse Analytics workspace and open Synapse Studio. 2. [Create a serverless Apache Spark pool](../synapse-analytics/get-started-analyze-spark.md#create-a-serverless-apache-spark-pool) in your workspace if one doesn't already exist. @@ -74,12 +88,13 @@ Before you start, make sure you've completed the following steps: df.printSchema() ``` 5. Select **Run All** to see the results -[![Screenshot of spark run results in Azure Synapse Analytics.](./media/stream-analytics-no-code/spark-run-all.png)](./media/stream-analytics-no-code/spark-run-all.png#lightbox) + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/spark-run-all.png" alt-text="Screenshot of spark run results in Azure Synapse Analytics." lightbox="./media/event-hubs-parquet-capture-tutorial/spark-run-all.png"::: ### Query using Azure Synapse Serverless SQL 1. In the **Develop** hub, create a new **SQL script**. -2. Paste the following script and **Run** it using the **Built-in** serverless SQL endpoint. Replace *container* and *adlsname* with the name of the container and ADLS Gen2 account used in the previous step. - ``SQL +2. Paste the following script and **Run** it using the **Built-in** serverless SQL endpoint. Replace *container* and *adlsname* with the name of the container and ADLS Gen2 account used in the previous step. + ```SQL SELECT TOP 100 * FROM @@ -88,7 +103,8 @@ Before you start, make sure you've completed the following steps: FORMAT='PARQUET' ) AS [result] ``` -[![Screenshot of SQL query results using Azure Synapse Analytics.](./media/stream-analytics-no-code/sql-results.png)](./media/stream-analytics-no-code/sql-results.png#lightbox) + + :::image type="content" source="./media/event-hubs-parquet-capture-tutorial/sql-results.png" alt-text="Screenshot of SQL script results in Azure Synapse Analytics." lightbox="./media/event-hubs-parquet-capture-tutorial/sql-results.png"::: ## Clean up resources 1. Locate your Event Hubs instance and see the list of Stream Analytics jobs under **Process Data** section. Stop any jobs that are running. diff --git a/articles/stream-analytics/filter-ingest-data-lake-storage-gen2.md b/articles/stream-analytics/filter-ingest-data-lake-storage-gen2.md index 90a910f03620..3776e18a888a 100644 --- a/articles/stream-analytics/filter-ingest-data-lake-storage-gen2.md +++ b/articles/stream-analytics/filter-ingest-data-lake-storage-gen2.md @@ -6,7 +6,7 @@ ms.author: sidram ms.service: stream-analytics ms.topic: how-to ms.custom: mvc, event-tier1-build-2022 -ms.date: 05/08/2022 +ms.date: 05/24/2022 --- # Filter and ingest to Azure Data Lake Storage Gen2 using the Stream Analytics no code editor @@ -32,9 +32,9 @@ This article describes how you can use the no code editor to easily create a Str :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/add-field.png" alt-text="Screenshot showing where you can add a field or remove, rename, or change a field type." lightbox="./media/filter-ingest-data-lake-storage-gen2/add-field.png" ::: 1. A live sample of incoming data in **Data preview** table under the diagram view. It automatically refreshes periodically. You can select **Pause streaming preview** to see a static view of sample input data. :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/sample-input.png" alt-text="Screenshot showing sample data on the Data preview tab." lightbox="./media/filter-ingest-data-lake-storage-gen2/sample-input.png" ::: -1. In the **Filter** area, select a field to filter the incoming data with a condition. +1. Select the **Filter** tile. In the **Filter** area, select a field to filter the incoming data with a condition. :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/filter-data.png" alt-text="Screenshot showing the Filter area where you can add a conditional filter." lightbox="./media/filter-ingest-data-lake-storage-gen2/filter-data.png" ::: -1. Select the Azure Data Lake Gen2 table to send your filtered data: +1. Select the **Azure Data Lake Storage Gen2** tile. Select the **Azure Data Lake Gen2** account to send your filtered data: 1. Select the **subscription**, **storage account name**, and **container** from the drop-down menu. 1. After the **subscription** is selected, the **authentication method** and **storage account key** should be automatically filled in. Select **Connect**. For more information about the fields and to see examples of path pattern, see [Blob storage and Azure Data Lake Gen2 output from Azure Stream Analytics](blob-storage-azure-data-lake-gen2-output.md). @@ -47,10 +47,29 @@ This article describes how you can use the no code editor to easily create a Str 1. After your select **Start**, the job starts running within two minutes. :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png" alt-text="Screenshot showing the Start Stream Analytics job window." lightbox="./media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png" ::: -You can see the job under the Process Data section in the **Stream Analytics jobs** tab. Select **Open metrics** to monitor it or stop and restart it, as needed. +You can see the job under the Process Data section in the **Stream Analytics jobs** tab. Select **Refresh** until you see the job status as **Running**. Select **Open metrics** to monitor it or stop and restart it, as needed. :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png" alt-text="Screenshot showing the Stream Analytics jobs tab." lightbox="./media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png" ::: +Here's a sample **Metrics** page: + +:::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/metrics-page.png" alt-text="Screenshot showing the Metrics page." lightbox="./media/filter-ingest-data-lake-storage-gen2/metrics-page.png" ::: + + +## Verify data in Data Lake Storage + +1. You should see files created in the container you specified. + + :::image type="content" source="./media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png" alt-text="Screenshot showing the generated file with filtered data in the Azure Data Lake Storage." lightbox="./media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png" ::: +1. Download and open the file to confirm that you see only the filtered data. In the following example, you see data with **SwitchNum** set to **US**. + + ```json + {"RecordType":"MO","SystemIdentity":"d0","FileNum":"548","SwitchNum":"US","CallingNum":"345697969","CallingIMSI":"466921402416657","CalledNum":"012332886","CalledIMSI":"466923101048691","DateS":"20220524","TimeType":0,"CallPeriod":0,"ServiceType":"S","Transfer":0,"OutgoingTrunk":"419","MSRN":"1416960750071","callrecTime":"2022-05-25T02:07:10Z","EventProcessedUtcTime":"2022-05-25T02:07:50.5478116Z","PartitionId":0,"EventEnqueuedUtcTime":"2022-05-25T02:07:09.5140000Z", "TimeS":null,"CallingCellID":null,"CalledCellID":null,"IncomingTrunk":null,"CalledNum2":null,"FCIFlag":null} + {"RecordType":"MO","SystemIdentity":"d0","FileNum":"552","SwitchNum":"US","CallingNum":"012351287","CallingIMSI":"262021390056324","CalledNum":"012301973","CalledIMSI":"466922202613463","DateS":"20220524","TimeType":3,"CallPeriod":0,"ServiceType":"V","Transfer":0,"OutgoingTrunk":"442","MSRN":"886932428242","callrecTime":"2022-05-25T02:07:13Z","EventProcessedUtcTime":"2022-05-25T02:07:50.5478116Z","PartitionId":0,"EventEnqueuedUtcTime":"2022-05-25T02:07:12.7350000Z", "TimeS":null,"CallingCellID":null,"CalledCellID":null,"IncomingTrunk":null,"CalledNum2":null,"FCIFlag":null} + {"RecordType":"MO","SystemIdentity":"d0","FileNum":"559","SwitchNum":"US","CallingNum":"456757102","CallingIMSI":"466920401237309","CalledNum":"345617823","CalledIMSI":"466923000886460","DateS":"20220524","TimeType":1,"CallPeriod":696,"ServiceType":"V","Transfer":1,"OutgoingTrunk":"419","MSRN":"886932429155","callrecTime":"2022-05-25T02:07:22Z","EventProcessedUtcTime":"2022-05-25T02:07:50.5478116Z","PartitionId":0,"EventEnqueuedUtcTime":"2022-05-25T02:07:21.9190000Z", "TimeS":null,"CallingCellID":null,"CalledCellID":null,"IncomingTrunk":null,"CalledNum2":null,"FCIFlag":null} + ``` + + ## Next steps Learn more about Azure Stream Analytics and how to monitor the job you've created. diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/job-metrics.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/job-metrics.png new file mode 100644 index 000000000000..a06e59411150 Binary files /dev/null and b/articles/stream-analytics/media/capture-event-hub-data-parquet/job-metrics.png differ diff --git a/articles/stream-analytics/media/capture-event-hub-data-parquet/verify-captured-data.png b/articles/stream-analytics/media/capture-event-hub-data-parquet/verify-captured-data.png new file mode 100644 index 000000000000..a792f7fa5c64 Binary files /dev/null and b/articles/stream-analytics/media/capture-event-hub-data-parquet/verify-captured-data.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png new file mode 100644 index 000000000000..980c1df0f919 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-lake-storage-settings.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-preview.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-preview.png new file mode 100644 index 000000000000..4f1f8328bd15 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/data-preview.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png new file mode 100644 index 000000000000..32dcff68832d Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/event-hub-configuration.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/job-list.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/job-list.png new file mode 100644 index 000000000000..82bcf91ff8a7 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/job-list.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/spark-run-all.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/spark-run-all.png new file mode 100644 index 000000000000..88460c872e15 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/spark-run-all.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/sql-results.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/sql-results.png new file mode 100644 index 000000000000..5e599a43b598 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/sql-results.png differ diff --git a/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/start-job.png b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/start-job.png new file mode 100644 index 000000000000..3c691fd3ecc1 Binary files /dev/null and b/articles/stream-analytics/media/event-hubs-parquet-capture-tutorial/start-job.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/add-field.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/add-field.png index 3ce3e26077ad..0fd8ad03cc7d 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/add-field.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/add-field.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/create-job.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/create-job.png index ea0d0f603985..b19b8e82936e 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/create-job.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/create-job.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/data-lake-configuration.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/data-lake-configuration.png index bdef08cfe4db..2dfc0ec83882 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/data-lake-configuration.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/data-lake-configuration.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/event-hub-review-connect.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/event-hub-review-connect.png index ff11425a56b2..ee2f61ea5b03 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/event-hub-review-connect.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/event-hub-review-connect.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data-lake-gen2-card-start.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data-lake-gen2-card-start.png index 85ad98232961..9a31ccb69918 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data-lake-gen2-card-start.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data-lake-gen2-card-start.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data.png index 8f4295df04f1..5d6d2443581b 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filter-data.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png new file mode 100644 index 000000000000..2a7cc46e9d83 Binary files /dev/null and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/filtered-data-file.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/metrics-page.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/metrics-page.png new file mode 100644 index 000000000000..e8aac3b95832 Binary files /dev/null and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/metrics-page.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png index 69c0424f984b..f4175d854169 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-list-jobs.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-save-start.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-save-start.png index b7aa61f65813..7050f7e8fdd0 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-save-start.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-save-start.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png index f897fc399a6a..58c0fd59bae6 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/no-code-start-job.png differ diff --git a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/sample-input.png b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/sample-input.png index e841001f6a7b..08ae35445d90 100644 Binary files a/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/sample-input.png and b/articles/stream-analytics/media/filter-ingest-data-lake-storage-gen2/sample-input.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/add-make-field.png b/articles/stream-analytics/media/stream-analytics-no-code/add-make-field.png new file mode 100644 index 000000000000..884ad7009b8a Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/add-make-field.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/connect-group.png b/articles/stream-analytics/media/stream-analytics-no-code/connect-group.png new file mode 100644 index 000000000000..d8dbee6b7611 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/connect-group.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/data-preview-fields.png b/articles/stream-analytics/media/stream-analytics-no-code/data-preview-fields.png new file mode 100644 index 000000000000..eedd7397c085 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/data-preview-fields.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/event-hub-configuration.png b/articles/stream-analytics/media/stream-analytics-no-code/event-hub-configuration.png new file mode 100644 index 000000000000..f142d8fb78f9 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/event-hub-configuration.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/group-settings.png b/articles/stream-analytics/media/stream-analytics-no-code/group-settings.png new file mode 100644 index 000000000000..c0b0311135cd Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/group-settings.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/job-list.png b/articles/stream-analytics/media/stream-analytics-no-code/job-list.png new file mode 100644 index 000000000000..4c5e1a3ae5c9 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/job-list.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/job-name.png b/articles/stream-analytics/media/stream-analytics-no-code/job-name.png new file mode 100644 index 000000000000..655b0eb94dbd Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/job-name.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/manage-fields-page.png b/articles/stream-analytics/media/stream-analytics-no-code/manage-fields-page.png new file mode 100644 index 000000000000..81f8cb348821 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/manage-fields-page.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/manage-fields.png b/articles/stream-analytics/media/stream-analytics-no-code/manage-fields.png new file mode 100644 index 000000000000..beab5eca313c Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/manage-fields.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/new-stream-analytics-job.png b/articles/stream-analytics/media/stream-analytics-no-code/new-stream-analytics-job.png new file mode 100644 index 000000000000..ad6729f42f67 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/new-stream-analytics-job.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/parquet-capture-start.png b/articles/stream-analytics/media/stream-analytics-no-code/parquet-capture-start.png index 2796451ee272..11a8b3532c90 100644 Binary files a/articles/stream-analytics/media/stream-analytics-no-code/parquet-capture-start.png and b/articles/stream-analytics/media/stream-analytics-no-code/parquet-capture-start.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/real-time-dashboard-power-bi.png b/articles/stream-analytics/media/stream-analytics-no-code/real-time-dashboard-power-bi.png index 8694696f688f..734f700a32eb 100644 Binary files a/articles/stream-analytics/media/stream-analytics-no-code/real-time-dashboard-power-bi.png and b/articles/stream-analytics/media/stream-analytics-no-code/real-time-dashboard-power-bi.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/rename-fields.png b/articles/stream-analytics/media/stream-analytics-no-code/rename-fields.png new file mode 100644 index 000000000000..cbaf60d54894 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/rename-fields.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/select-event-hub.png b/articles/stream-analytics/media/stream-analytics-no-code/select-event-hub.png new file mode 100644 index 000000000000..1b9563c89f4c Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/select-event-hub.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/start-analytics-job.png b/articles/stream-analytics/media/stream-analytics-no-code/start-analytics-job.png new file mode 100644 index 000000000000..339711b3430d Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/start-analytics-job.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/start-blank-canvas.png b/articles/stream-analytics/media/stream-analytics-no-code/start-blank-canvas.png new file mode 100644 index 000000000000..91006cd3aec6 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/start-blank-canvas.png differ diff --git a/articles/stream-analytics/media/stream-analytics-no-code/synapse-settings.png b/articles/stream-analytics/media/stream-analytics-no-code/synapse-settings.png new file mode 100644 index 000000000000..4b6a62381101 Binary files /dev/null and b/articles/stream-analytics/media/stream-analytics-no-code/synapse-settings.png differ diff --git a/articles/stream-analytics/no-code-power-bi-tutorial.md b/articles/stream-analytics/no-code-power-bi-tutorial.md index c52030a336fa..372f02d054f9 100644 --- a/articles/stream-analytics/no-code-power-bi-tutorial.md +++ b/articles/stream-analytics/no-code-power-bi-tutorial.md @@ -5,7 +5,7 @@ author: sidramadoss ms.author: sidram ms.service: stream-analytics ms.topic: how-to -ms.date: 05/23/2022 +ms.date: 05/25/2022 ms.custom: seodec18 --- @@ -28,9 +28,9 @@ In this tutorial, you learn how to: Before you start, make sure you've completed the following steps: * If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/). -* Deploy the TollApp event generator to Azure, use this link to [Deploy TollApp Azure Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-stream-analytics%2Fmaster%2FSamples%2FTollApp%2FVSProjects%2FTollAppDeployment%2Fazuredeploy.json). Set the 'interval' parameter to 1. And use a new resource group for this. +* Deploy the TollApp event generator to Azure, use this link to [Deploy TollApp Azure Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-stream-analytics%2Fmaster%2FSamples%2FTollApp%2FVSProjects%2FTollAppDeployment%2Fazuredeploy.json). Set the 'interval' parameter to 1. And use a new resource group for this step. * Create an [Azure Synapse Analytics workspace](../synapse-analytics/get-started-create-workspace.md) with a [Dedicated SQL pool](../synapse-analytics/get-started-analyze-sql-pool.md#create-a-dedicated-sql-pool). -* Create a table named **carsummary** using your Dedicated SQL pool. You can do this by running the following SQL script: +* Create a table named **carsummary** using your Dedicated SQL pool. You can do it by running the following SQL script: ```SQL CREATE TABLE carsummary ( @@ -42,36 +42,65 @@ Before you start, make sure you've completed the following steps: ``` ## Use no code editor to create a Stream Analytics job 1. Locate the Resource Group in which the TollApp event generator was deployed. -2. Select the Azure Event Hubs namespace. And then under the Event Hubs section, select **entrystream** instance. -3. Go to **Process data** under Features section and then click **start** on the **Start with blank canvas** template. -[![Screenshot of real time dashboard template in no code editor.](./media/stream-analytics-no-code/real-time-dashboard-power-bi.png)](./media/stream-analytics-no-code/real-time-dashboard-power-bi.png#lightbox) -4. Name your job **carsummary** and select **Create**. -5. Configure your event hub input by specifying - * Consumer Group: Default - * Serialization type of your input data: JSON - * Authentication mode which the job will use to connect to your event hub: Connection String defaults - * Click **Connect** -6. Within few seconds, you'll see sample input data and the schema. You can choose to drop fields, rename fields or change data type if you want. -7. Click the **Group by** tile on the canvas and connect it to the event hub tile. Configure the Group By tile by specifying: - * Aggregation as **Count** - * Field as **Make** which is a nested field inside **CarModel** - * Click **Save** - * In the **Group by** settings, select **Make** and **Tumbling window** of **3 minutes** -8. Click the **Manage Fields** tile and connect it to the Group by tile on canvas. Configure the **Manage Fields** tile by specifying: - * Clicking on **Add all fields** - * Rename the fields by clicking on the fields and changing the names from: - * COUNT_make to CarCount - * Window_End_Time to times -9. Click the **Azure Synapse Analytics** tile and connect it to Manage Fields tile on your canvas. Configure Azure Synapse Analytics by specifying: +2. Select the Azure Event Hubs **namespace**. +1. On the **Event Hubs Namespace** page, select **Event Hubs** under **Entities** on the left menu. +1. Select **entrystream** instance. + + :::image type="content" source="./media/stream-analytics-no-code/select-event-hub.png" alt-text="Screenshot showing the selection of the event hub." lightbox="./media/stream-analytics-no-code/select-event-hub.png"::: +1. Go to **Process data** under Features section and then select **start** on the **Start with blank canvas** template. + + :::image type="content" source="./media/stream-analytics-no-code/start-blank-canvas.png" alt-text="Screenshot showing the selection of the Start button on the Start with a blank canvas tile." lightbox="./media/stream-analytics-no-code/start-blank-canvas.png"::: +1. Name your job **carsummary** and select **Create**. + + :::image type="content" source="./media/stream-analytics-no-code/job-name.png" alt-text="Screenshot of the New Stream Analytics job page." lightbox="./media/stream-analytics-no-code/job-name.png"::: +1. On the **event hub** configuration page, confirm the following settings, and then select **Connect**. + - *Consumer Group*: Default + - *Serialization type* of your input data: JSON + - *Authentication mode* that the job will use to connect to your event hub: Connection string. + + :::image type="content" source="./media/stream-analytics-no-code/event-hub-configuration.png" alt-text="Screenshot of the configuration page for your event hub." lightbox="./media/stream-analytics-no-code/event-hub-configuration.png"::: +1. Within few seconds, you'll see sample input data and the schema. You can choose to drop fields, rename fields or change data type if you want. + + :::image type="content" source="./media/stream-analytics-no-code/data-preview-fields.png" alt-text="Screenshot showing the preview of data in the event hub and the fields." lightbox="./media/stream-analytics-no-code/data-preview-fields.png"::: +1. Select the **Group by** tile on the canvas and connect it to the event hub tile. + + :::image type="content" source="./media/stream-analytics-no-code/connect-group.png" alt-text="Screenshot showing the Group tile connected to the Event Hubs tile." lightbox="./media/stream-analytics-no-code/connect-group.png"::: +1. Configure the **Group by** tile by specifying: + 1. Aggregation as **Count**. + 1. Field as **Make** which is a nested field inside **CarModel**. + 1. Select **Save**. + 1. In the **Group by** settings, select **Make** and **Tumbling window** of **3 minutes** + + :::image type="content" source="./media/stream-analytics-no-code/group-settings.png" alt-text="Screenshot of the Group by configuration page." lightbox="./media/stream-analytics-no-code/group-settings.png"::: +1. Select **Add field** on the **Manage fields** page, and add the **Make** field as shown in the following image, and then select **Save**. + + :::image type="content" source="./media/stream-analytics-no-code/add-make-field.png" alt-text="Screenshot showing the addition of the Make field." lightbox="./media/stream-analytics-no-code/add-make-field.png"::: +1. Select **Manage fields** on the command bar. Connect the **Manage Fields** tile to the **Group by tile** on canvas. Select **Add all fields** on the **Manage fields** configuration page. + + :::image type="content" source="./media/stream-analytics-no-code/manage-fields.png" alt-text="Screenshot of the Manage fields page." lightbox="./media/stream-analytics-no-code/manage-fields.png"::: +1. Select **...** next to the fields, and select **Edit** to rename them. + - **COUNT_make** to **CarCount** + - **Window_End_Time** to **times** + + :::image type="content" source="./media/stream-analytics-no-code/rename-fields.png" alt-text="Screenshot of the Manage fields page with the fields renamed." lightbox="./media/stream-analytics-no-code/rename-fields.png"::: +1. The **Manage fields** page should look as shown in the following image. + + :::image type="content" source="./media/stream-analytics-no-code/manage-fields-page.png" alt-text="Screenshot of the Manage fields page with three fields." lightbox="./media/stream-analytics-no-code/manage-fields-page.png"::: +1. Select **Synapse** on the command bar. Connect the **Synapse** tile to the **Manage fields** tile on your canvas. +1. Configure Azure Synapse Analytics by specifying: * Subscription where your Azure Synapse Analytics is located - * Database of the Dedicated SQL pool which you used to create the Table in the previous section. + * Database of the Dedicated SQL pool that you used to create the **carsummary** table in the previous section. * Username and password to authenticate * Table name as **carsummary** - * Click **Connect**. You'll see sample results that will be written to your Synapse SQL table. - [![Screenshot of synapse output in no code editor.](./media/stream-analytics-no-code/synapse-output.png)](./media/stream-analytics-no-code/synapse-output.png#lightbox) -8. Select **Save** in the top ribbon to save your job and then select **Start**. Set Streaming Unit count to 3 and then click **Start** to run your job. Specify the storage account that will be used by Synapse SQL to load data into your data warehouse. -9. You'll then see a list of all Stream Analytics jobs created using the no code editor. And within two minutes, your job will go to a **Running** state. -[![Screenshot of job in running state in no code editor.](./media/stream-analytics-no-code/cosmos-db-running-state.png)](./media/stream-analytics-no-code/cosmos-db-running-state.png#lightbox) + * Select **Connect**. You'll see sample results that will be written to your Synapse SQL table. + + :::image type="content" source="./media/stream-analytics-no-code/synapse-settings.png" alt-text="Screenshot of the Synapse tile settings." lightbox="./media/stream-analytics-no-code/synapse-settings.png"::: +1. Select **Save** in the top ribbon to save your job and then select **Start**. Set Streaming Unit count to 3 and then select **Start** to run your job. Specify the storage account that will be used by Synapse SQL to load data into your data warehouse. + + :::image type="content" source="./media/stream-analytics-no-code/start-analytics-job.png" alt-text="Screenshot of the Start Stream Analytics Job page." lightbox="./media/stream-analytics-no-code/start-analytics-job.png"::: +1. You'll then see a list of all Stream Analytics jobs created using the no code editor. And within two minutes, your job will go to a **Running** state. Select the **Refresh** button on the page to see the status changing from Created -> Starting -> Running. + + :::image type="content" source="./media/stream-analytics-no-code/job-list.png" alt-text="Screenshot showing the list of jobs." lightbox="./media/stream-analytics-no-code/job-list.png"::: ## Create a Power BI visualization 1. Download the latest version of [Power BI desktop](https://powerbi.microsoft.com/desktop). diff --git a/articles/synapse-analytics/backuprestore/sqlpool-create-restore-point.md b/articles/synapse-analytics/backuprestore/sqlpool-create-restore-point.md index 80b646babcc5..b13f109c1e7a 100644 --- a/articles/synapse-analytics/backuprestore/sqlpool-create-restore-point.md +++ b/articles/synapse-analytics/backuprestore/sqlpool-create-restore-point.md @@ -2,13 +2,13 @@ title: Create a user defined restore point for a dedicated SQL pool description: Learn how to use the Azure portal to create a user-defined restore point for dedicated SQL pool in Azure Synapse Analytics. author: joannapea -manager: igorstan +manager: ms.service: synapse-analytics ms.topic: how-to ms.subservice: sql ms.date: 10/29/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- # User-defined restore points diff --git a/articles/synapse-analytics/catalog-and-governance/how-to-discover-connect-analyze-azure-purview.md b/articles/synapse-analytics/catalog-and-governance/how-to-discover-connect-analyze-azure-purview.md index be18c61dfd3e..c242ee70fa5c 100644 --- a/articles/synapse-analytics/catalog-and-governance/how-to-discover-connect-analyze-azure-purview.md +++ b/articles/synapse-analytics/catalog-and-governance/how-to-discover-connect-analyze-azure-purview.md @@ -7,7 +7,7 @@ ms.subservice: purview ms.topic: how-to ms.date: 12/16/2020 ms.author: jejiang -ms.reviewer: jrasnick +ms.reviewer: wiassaf --- # Discover, connect, and explore data in Synapse using Microsoft Purview diff --git a/articles/synapse-analytics/guidance/security-white-paper-introduction.md b/articles/synapse-analytics/guidance/security-white-paper-introduction.md index 909ba27cc6f6..d2996ecc7e53 100644 --- a/articles/synapse-analytics/guidance/security-white-paper-introduction.md +++ b/articles/synapse-analytics/guidance/security-white-paper-introduction.md @@ -65,7 +65,7 @@ Azure Synapse is a Platform-as-a-service (PaaS) analytics service that brings to [Pipelines](../../data-factory/concepts-pipelines-activities.md) are a logical grouping of activities that perform data movement and data transformation at scale. [Data flow](../../data-factory/concepts-data-flow-overview.md) is a transformation activity in a pipeline that's developed by using a low-code user interface. It can execute data transformations at scale. Behind the scenes, data flows use Apache Spark clusters of Azure Synapse to execute automatically generated code. Pipelines and data flows are compute-only services, and they don't have any managed storage associated with them. -Pipelines use the Integration Runtime (IR) as the scalable compute infrastructure for performing data movement and dispatch activities. Data movement activities run on the IR whereas the dispatch activities run on variety of other compute engines, including Azure SQL Database, Azure HDInsight, Azure Databricks, Apache Spark clusters of Azure Synapse, and others. Azure Synapse supports two types of IR: Azure Integration Runtime and Self-hosted Integration Runtime. The [Azure IR](/azure/data-factory/concepts-integration-runtime.md#azure-integration-runtime) provides a fully managed, scalable, and on-demand compute infrastructure. The [Self-hosted IR](/azure/data-factory/concepts-integration-runtime.md#self-hosted-integration-runtime) is installed and configured by the customer in their own network, either in on-premises machines or in Azure cloud virtual machines. +Pipelines use the Integration Runtime (IR) as the scalable compute infrastructure for performing data movement and dispatch activities. Data movement activities run on the IR whereas the dispatch activities run on variety of other compute engines, including Azure SQL Database, Azure HDInsight, Azure Databricks, Apache Spark clusters of Azure Synapse, and others. Azure Synapse supports two types of IR: Azure Integration Runtime and Self-hosted Integration Runtime. The [Azure IR](../../data-factory/concepts-integration-runtime.md#azure-integration-runtime) provides a fully managed, scalable, and on-demand compute infrastructure. The [Self-hosted IR](../../data-factory/concepts-integration-runtime.md#self-hosted-integration-runtime) is installed and configured by the customer in their own network, either in on-premises machines or in Azure cloud virtual machines. Customers can choose to associate their Synapse workspace with a [managed workspace virtual network](../security/synapse-workspace-managed-vnet.md). When associated with a managed workspace virtual network, Azure IRs and Apache Spark clusters that are used by pipelines, data flows, and the Apache Spark pools are deployed inside the managed workspace virtual network. This setup ensures network isolation between the workspaces for pipelines and Apache Spark workloads. @@ -91,4 +91,4 @@ Azure Synapse implements a multi-layered security architecture for end-to-end pr ## Next steps -In the [next article](security-white-paper-data-protection.md) in this white paper series, learn about data protection. +In the [next article](security-white-paper-data-protection.md) in this white paper series, learn about data protection. \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/index.yml b/articles/synapse-analytics/migration-guides/index.yml new file mode 100644 index 000000000000..bfda95764d15 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/index.yml @@ -0,0 +1,89 @@ +### YamlMime:Landing + +title: Azure Synapse Analytics migration guides # < 60 chars +summary: Azure Synapse migration guides tell the story of bringing existing enterprise analytics solutions to the limitless analytics in Azure Synapse. # < 160 chars +metadata: + title: Azure Synapse Analytics Migration Guides # Required; page title displayed in search results. Include the brand. < 60 chars. + description: Azure Synapse migration guides tell the story of bringing existing enterprise analytics solutions to the limitless analytics in Azure Synapse. # Required; article description that is displayed in search results. < 160 chars. + ms.service: synapse-analytics #Required; service per approved list. service slug assigned to your service by ACOM. + ms.subservice: overview + ms.topic: landing-page # Required + ms.collection: collection + author: WilliamDAssafMSFT #Required; your GitHub user alias, with correct capitalization. + ms.author: wiassaf #Required; microsoft alias of author; optional team alias. + ms.date: 05/24/2022 #Required; mm/dd/yyyy format. + +# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new + +landingContent: +# Cards and links should be based on top customer tasks or top subjects +# Start card title with a verb + # Card (optional) + - title: Overview + linkLists: + - linkListType: overview + links: + - text: What is Azure Synapse Analytics? + url: ../overview-what-is.md + - text: Get Started with Azure Synapse Analytics + url: ../get-started.md + - text: What is dedicated SQL pool (formerly SQL DW)? + url: ../sql-data-warehouse/sql-data-warehouse-overview-what-is.md + - text: Benefits of cloud migration + url: https://azure.microsoft.com/overview/cloud-migration-benefits-challenges + - text: Migrate a data warehouse to a dedicated SQL pool + url: migrate-to-synapse-analytics-guide.md + - text: Enable Synapse workspace features for a dedicated SQL pool (formerly SQL DW) + url: ../sql-data-warehouse/workspace-connected-create.md + - title: Migration resources + linkLists: + - linkListType: architecture + links: + - text: Azure Synapse Resources + url: ../index.yml + - text: Azure Synapse SQL architecture + url: ../sql/overview-architecture.md + - linkListType: reference + links: + - text: "Customer story: Co-op" + url: https://customers.microsoft.com/story/845578-co-op-retailers-azure + - linkListType: deploy + links: + - text: "Preferred migration accelerator: Next Pathway" + url: https://www.nextpathway.com/ + - title: From IBM Netezza + linkLists: + - linkListType: overview + links: + - text: 1. Design and performance for IBM Netezza migrations + url: netezza/1-design-performance-migration.md + - text: 2. ETL and load migration considerations + url: netezza/2-etl-load-migration-considerations.md + - text: 3. Security access operations + url: netezza/3-security-access-operations.md + - text: 4. Visualization and reporting + url: netezza/4-visualization-reporting.md + - text: 5. Minimize SQL issues + url: netezza/5-minimize-sql-issues.md + - text: 6. Microsoft and third-party migration tools + url: netezza/6-microsoft-third-party-migration-tools.md + - text: 7. Beyond migration implementation + url: netezza/7-beyond-data-warehouse-migration.md + - title: From Teradata + linkLists: + - linkListType: overview + links: + - text: 1. Design and performance for Teradata migrations + url: teradata/1-design-performance-migration.md + - text: 2. ETL and load migration considerations + url: teradata/2-etl-load-migration-considerations.md + - text: 3. Security access operations + url: teradata/3-security-access-operations.md + - text: 4. Visualization and reporting + url: teradata/4-visualization-reporting.md + - text: 5. Minimize SQL issues + url: teradata/5-minimize-sql-issues.md + - text: 6. Microsoft and third-party migration tools + url: teradata/6-microsoft-third-party-migration-tools.md + - text: 7. Beyond migration implementation + url: teradata/7-beyond-data-warehouse-migration.md \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/azure-synapse-ecosystem.png b/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/azure-synapse-ecosystem.png new file mode 100644 index 000000000000..1d8006150e42 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/azure-synapse-ecosystem.png differ diff --git a/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/migration-steps.png b/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/migration-steps.png new file mode 100644 index 000000000000..1314a31feec9 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/1-design-performance-migration/migration-steps.png differ diff --git a/articles/synapse-analytics/migration-guides/media/2-etl-load-migration-considerations/migration-options-flowchart.png b/articles/synapse-analytics/migration-guides/media/2-etl-load-migration-considerations/migration-options-flowchart.png new file mode 100644 index 000000000000..d2757f735b3f Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/2-etl-load-migration-considerations/migration-options-flowchart.png differ diff --git a/articles/synapse-analytics/migration-guides/media/3-security-access-operations/automating-migration-privileges.png b/articles/synapse-analytics/migration-guides/media/3-security-access-operations/automating-migration-privileges.png new file mode 100644 index 000000000000..88bf2d7ff1c5 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/3-security-access-operations/automating-migration-privileges.png differ diff --git a/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/data-virtualization-semantics.png b/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/data-virtualization-semantics.png new file mode 100644 index 000000000000..c5db25678a17 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/data-virtualization-semantics.png differ diff --git a/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/migration-data-virtualization.png b/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/migration-data-virtualization.png new file mode 100644 index 000000000000..a488cbde9745 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/4-visualization-reporting/migration-data-virtualization.png differ diff --git a/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png new file mode 100644 index 000000000000..4b6454590692 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png differ diff --git a/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png new file mode 100644 index 000000000000..a2ee2a1faccc Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png differ diff --git a/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png new file mode 100644 index 000000000000..dcc0309b2539 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png new file mode 100644 index 000000000000..ee28b28424b3 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png new file mode 100644 index 000000000000..737de7631030 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png new file mode 100644 index 000000000000..da408160f6f4 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png new file mode 100644 index 000000000000..fa57bed387d6 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png new file mode 100644 index 000000000000..9e4964526ad1 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png new file mode 100644 index 000000000000..1df60a79042e Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png differ diff --git a/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png new file mode 100644 index 000000000000..ec0bae316012 Binary files /dev/null and b/articles/synapse-analytics/migration-guides/media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png differ diff --git a/articles/synapse-analytics/migration-guides/migrate-to-synapse-analytics-guide.md b/articles/synapse-analytics/migration-guides/migrate-to-synapse-analytics-guide.md index 7569031f8b37..152c14a0797d 100644 --- a/articles/synapse-analytics/migration-guides/migrate-to-synapse-analytics-guide.md +++ b/articles/synapse-analytics/migration-guides/migrate-to-synapse-analytics-guide.md @@ -8,8 +8,8 @@ ms.devlang: ms.topic: conceptual author: WilliamDAssafMSFT ms.author: wiassaf -ms.reviewer: sngun -ms.date: 03/10/2021 +ms.reviewer: +ms.date: 05/24/2022 --- # Migrate a data warehouse to a dedicated SQL pool in Azure Synapse Analytics @@ -54,6 +54,11 @@ Performing a successful migration requires you to migrate your table schemas, co The Customer Advisory Team has some great Azure Synapse Analytics (formerly Azure SQL Data Warehouse) guidance published as blog posts. For more information on migration, see [Migrating data to Azure SQL Data Warehouse in practice](/archive/blogs/sqlcat/migrating-data-to-azure-sql-data-warehouse-in-practice). +For more information specifically about migrations from Netezza or Teradata to Azure Synapse Analytics, start at the first step of a seven-article sequence on migrations: + +- [Netezza to Azure Synapse Analytics migrations](netezza/1-design-performance-migration.md) +- [Teradata to Azure Synapse Analytics migrations](teradata/1-design-performance-migration.md) + ## Migration assets from real-world engagements For more assistance with completing this migration scenario, see the following resources. They were developed in support of a real-world migration project engagement. @@ -64,8 +69,12 @@ For more assistance with completing this migration scenario, see the following r | [Handling data encoding issues while loading data to Azure Synapse Analytics](https://azure.microsoft.com/blog/handling-data-encoding-issues-while-loading-data-to-sql-data-warehouse/) | This blog post provides insight on some of the data encoding issues you might encounter while using PolyBase to load data to SQL Data Warehouse. This article also provides some options that you can use to overcome such issues and load the data successfully. | | [Getting table sizes in Azure Synapse Analytics dedicated SQL pool](https://github.com/Microsoft/DataMigrationTeam/blob/master/Whitepapers/Getting%20table%20sizes%20in%20SQL%20DW.pdf) | One of the key tasks that an architect must perform is to get metrics about a new environment post-migration. Examples include collecting load times from on-premises to the cloud and collecting PolyBase load times. One of the most important tasks is to determine the storage size in SQL Data Warehouse compared to the customer's current platform. | + The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. ## Videos Watch how [Walgreens migrated its retail inventory system](https://www.youtube.com/watch?v=86dhd8N1lH4) with about 100 TB of data from Netezza to Azure Synapse Analytics in record time. + +> [!TIP] +> For more information on Synapse migrations, see [Azure Synapse Analytics migration guides](index.yml). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/1-design-performance-migration.md b/articles/synapse-analytics/migration-guides/netezza/1-design-performance-migration.md new file mode 100644 index 000000000000..670e2aabb860 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/1-design-performance-migration.md @@ -0,0 +1,334 @@ +--- +title: "Design and performance for Netezza migrations" +description: Learn how Netezza and Azure Synapse SQL databases differ in their approach to high query performance on exceptionally large data volumes. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Design and performance for Netezza migrations + +This article is part one of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for design and performance. + +## Overview + +> [!TIP] +> More than just a database—the Azure environment includes a comprehensive set of capabilities and tools. + +Due to end of support from IBM, many existing users of Netezza data warehouse systems want to take advantage of the innovations provided by newer environments such as cloud, IaaS, and PaaS, and to delegate tasks like infrastructure maintenance and platform development to the cloud provider. + +Although Netezza and Azure Synapse are both SQL databases designed to use massively parallel processing (MPP) techniques to achieve high query performance on exceptionally large data volumes, there are some basic differences in approach: + +- Legacy Netezza systems are often installed on-premises and use proprietary hardware, while Azure Synapse is cloud based and uses Azure storage and compute resources. + +- Upgrading a Netezza configuration is a major task involving additional physical hardware and potentially lengthy database reconfiguration, or dump and reload. Since storage and compute resources are separate in the Azure environment, these resources can be scaled upwards or downwards independently, leveraging the elastic scaling capability. + +- Azure Synapse can be paused or resized as required to reduce resource utilization and cost. + +Microsoft Azure is a globally available, highly secure, scalable cloud environment, that includes Azure Synapse and an ecosystem of supporting tools and capabilities. The next diagram summarizes the Azure Synapse ecosystem. + +:::image type="content" source="../media/1-design-performance-migration/azure-synapse-ecosystem.png" border="true" alt-text="Chart showing the Azure Synapse ecosystem of supporting tools and capabilities."::: + +> [!TIP] +> Azure Synapse gives best-of-breed performance and price-performance in independent benchmarks. + +Azure Synapse provides best-of-breed relational database performance by using techniques such as massively parallel processing (MPP) and multiple levels of automated caching for frequently used data. See the results of this approach in independent benchmarks such as the one run recently by [GigaOm](https://research.gigaom.com/report/data-warehouse-cloud-benchmark/), which compares Azure Synapse to other popular cloud data warehouse offerings. Customers who have migrated to this environment have seen many benefits including: + +- Improved performance and price/performance. + +- Increased agility and shorter time to value. + +- Faster server deployment and application development. + +- Elastic scalability—only pay for actual usage. + +- Improved security/compliance. + +- Reduced storage and disaster recovery costs. + +- Lower overall TCO and better cost control (OPEX). + +To maximize these benefits, migrate new or existing data and applications to the Azure Synapse platform. In many organizations, this will include migrating an existing data warehouse from legacy on-premises platforms such as Netezza. At a high level, the basic process includes these steps: + +:::image type="content" source="../media/1-design-performance-migration/migration-steps.png" border="true" alt-text="Diagram showing the steps for preparing to migrate, migration, and post-migration."::: + +This paper looks at schema migration with a goal of equivalent or better performance of your migrated Netezza data warehouse and data marts on Azure Synapse. This paper applies specifically to migrations from an existing Netezza environment. + +## Design considerations + +### Migration scope + +> [!TIP] +> Create an inventory of objects to be migrated and document the migration process. + +#### Preparation for migration + +When migrating from a Netezza environment, there are some specific topics to consider in addition to the more general subjects described in this article. + +#### Choose the workload for the initial migration + +Legacy Netezza environments have typically evolved over time to encompass multiple subject areas and mixed workloads. When deciding where to start on an initial migration project, choose an area that can: + +- Prove the viability of migrating to Azure Synapse by quickly delivering the benefits of the new environment. + +- Allow the in-house technical staff to gain relevant experience of the processes and tools involved, which can be used in migrations to other areas. + +- Create a template for further migrations specific to the source Netezza environment and the current tools and processes that are already in place. + +A good candidate for an initial migration from the Netezza environment that would enable the items above, is typically one that implements a BI/Analytics workload (rather than an OLTP workload) with a data model that can be migrated with minimal modifications—normally a start or snowflake schema. + +The migration data volume for the initial exercise should be large enough to demonstrate the capabilities and benefits of the Azure Synapse environment while quickly demonstrating the value—typically in the 1-10TB range. + +To minimize the risk and reduce implementation time for the initial migration project, confine the scope of the migration to just the data marts. However, this won't address the broader topics such as ETL migration and historical data migration as part of the initial migration project. Address these topics in later phases of the project, once the migrated data mart layer is backfilled with the data and processes required to build them. + +#### Lift and shift as-is versus a phased approach incorporating changes + +> [!TIP] +> 'Lift and shift' is a good starting point, even if subsequent phases will implement changes to the data model. + +Whatever the drive and scope of the intended migration, there are—broadly speaking—two types of migration: + +##### Lift and shift + +In this case, the existing data model—such as a star schema—is migrated unchanged to the new Azure Synapse platform. The emphasis is on minimizing risk and the migration time required by reducing the work needed to realize the benefits of moving to the Azure cloud environment. + +This is a good fit for existing Netezza environments where a single data mart is being migrated, or where the data is already in a well-designed star or snowflake schema—or there are other pressures to move to a more modern cloud environment. + +##### Phased approach incorporating modifications + +In cases where a legacy warehouse has evolved over a long time, you might need to re-engineer to maintain the required performance levels or to support new data, such as Internet of Things (IoT) streams. Migrate to Azure Synapse to get the benefits of a scalable cloud environment as part of the re-engineering process. Migration could include a change in the underlying data model, such as a move from an Inmon model to a data vault. + +Microsoft recommends moving the existing data model as-is to Azure and using the performance and flexibility of the Azure environment to apply the re-engineering changes, leveraging Azure's capabilities to make the changes without impacting the existing source system. + +#### Use Azure Data Factory to implement a metadata-driven migration + +Automate and orchestrate the migration process by using the capabilities of the Azure environment. This approach minimizes the impact on the existing Netezza environment, which may already be running close to full capacity. + +Azure Data Factory is a cloud-based data integration service that allows creation of data-driven workflows in the cloud for orchestrating and automating data movement and data transformation. Using Data Factory, you can create and schedule data-driven workflows—called pipelines—to ingest data from disparate data stores. Data Factory can process and transform data by using compute services such as Azure HDInsight Hadoop, Spark, Azure Data Lake Analytics, and Azure Machine Learning. + +By creating metadata to list the data tables to be migrated and their location, you can use the Data Factory facilities to manage the migration process. + +### Design differences between Netezza and Azure Synapse + +#### Multiple databases versus a single database and schemas + +> [!TIP] +> Combine multiple databases into a single database in Azure Synapse and use schemas to logically separate the tables. + +In a Netezza environment, there are often multiple separate databases for individual parts of the overall environment. For example, there may be a separate database for data ingestion and staging tables, a database for the core warehouse tables, and another database for data marts, sometimes called a semantic layer. Processing these as ETL/ELT pipelines may implement cross-database joins and will move data between these separate databases. + +> [!TIP] +> Replace Netezza-specific features with Azure Synapse features. + +Querying within the Azure Synapse environment is limited to a single database. Schemas are used to separate the tables into logically separate groups. Therefore, we recommend using a series of schemas within the target Azure Synapse to mimic any separate databases migrated from the Netezza environment. If the Netezza environment already uses schemas, you may need to use a new naming convention to move the existing Netezza tables and views to the new environment—for example, concatenate the existing Netezza schema and table names into the new Azure Synapse table name and use schema names in the new environment to maintain the original separate database names. Schema consolidation naming can have dots—however, Azure Synapse Spark may have issues. You can use SQL views over the underlying tables to maintain the logical structures, but there are some potential downsides to this approach: + +- Views in Azure Synapse are read-only, so any updates to the data must take place on the underlying base tables. + +- There may already be one or more layers of views in existence, and adding an extra layer of views might impact performance and supportability as nested views are difficult to troubleshoot. + +#### Table considerations + +> [!TIP] +> Use existing indexes to indicate candidates for indexing in the migrated warehouse. + +When migrating tables between different technologies, only the raw data and the metadata that describes it gets physically moved between the two environments. Other database elements from the source system—such as indexes—aren't migrated as these may not be needed or may be implemented differently within the new target environment. + +However, it's important to understand where performance optimizations such as indexes have been used in the source environment, as this can indicate where to add performance optimization in the new target environment. For example, if queries in the source Netezza environment frequently use zone maps, it may indicate that a non-clustered index should be created within the migrated Azure Synapse. Other native performance optimization techniques (such as table replication) may be more applicable that a straight 'like for like' index creation. + +#### Unsupported Netezza database object types + +> [!TIP] +> Assess the impact of unsupported data types as part of the preparation phase + +Netezza implements some database objects that aren't directly supported in Azure Synapse, but there are methods to achieve the same functionality within the new environment: + +- Zone Maps—In Netezza, zone maps are automatically created and maintained for some column types and are used at query time to restrict the amount of data to be scanned. Zone Maps are created on the following column types: + - `INTEGER` columns of length 8 bytes or less. + - Temporal columns. For instance, `DATE`, `TIME`, and `TIMESTAMP`. + - `CHAR` columns, if these are part of a materialized view and mentioned in the `ORDER BY` clause. + + You can find out which columns have zone maps by using the `nz_zonemap` utility, which is part of the NZ Toolkit. Azure Synapse doesn't include zone maps, but you can achieve similar results by using other user-defined index types and/or partitioning. + +- Clustered Base tables (CBT)—In Netezza, CBTs are commonly used for fact tables, which can have billions of records. Scanning such a huge table requires a lot of processing time, since a full table scan might be needed to get relevant records. Organizing records on restrictive CBT via allows Netezza to group records in same or nearby extents. This process also creates zone maps that improve the performance by reducing the amount of data to be scanned. + + In Azure Synapse, you can achieve a similar effect by use of partitioning and/or use of other indexes. + +- Materialized views—Netezza supports materialized views and recommends creating one or more of these over large tables having many columns where only a few of those columns are regularly used in queries. The system automatically maintains materialized views when data in the base table is updated. + + Azure Synapse supports materialized views, with the same functionality as Netezza. + +#### Netezza data type mapping + +Most Netezza data types have a direct equivalent in Azure Synapse. This table shows these data types together with the recommended approach for handling them. + +| Netezza Data Type | Azure Synapse Data Type | +|--------------------------------|-------------------------------------| +| BIGINT | BIGINT | +| BINARY VARYING(n) | VARBINARY(n) | +| BOOLEAN | BIT | +| BYTEINT | TINYINT | +| CHARACTER VARYING(n) | VARCHAR(n) | +| CHARACTER(n) | CHAR(n) | +| DATE | DATE(DATE | +| DECIMAL(p,s) | DECIMAL(p,s) | +| DOUBLE PRECISION | FLOAT | +| FLOAT(n) | FLOAT(n) | +| INTEGER | INT | +| INTERVAL | INTERVAL data types aren't currently directly supported in Azure Synapse but can be calculated using temporal functions such as DATEDIFF | +| MONEY | MONEY | +| NATIONAL CHARACTER VARYING(n) | NVARCHAR(n) | +| NATIONAL CHARACTER(n) | NCHAR(n) | +| NUMERIC(p,s) | NUMERIC(p,s) | +| REAL | REAL | +| SMALLINT | SMALLINT | +| ST_GEOMETRY(n) | Spatial data types such as ST_GEOMETRY aren't currently supported in Azure Synapse, but the data could be stored as VARCHAR or VARBINARY | +| TIME | TIME | +| TIME WITH TIME ZONE | DATETIMEOFFSET | +| TIMESTAMP | DATETIME | + +> [!TIP] +> Assess the number and type of non-data objects to be migrated as part of the preparation phase. + +There are third-party vendors who offer tools and services to automate migration, including the mapping of data types. If a third-party ETL tool such as Informatica or Talend is already in use in the Netezza environment, those tools can implement any required data transformations. + +#### SQL DML syntax differences + +There are a few differences in SQL Data Manipulation Language (DML) syntax between Netezza SQL and Azure Synapse (T-SQL) that you should be aware of during migration: + +- `STRPOS`: In Netezza, the `STRPOS` function returns the position of a substring within a string. The equivalent function in Azure Synapse is `CHARINDEX`, with the order of the arguments reversed. For example, `SELECT STRPOS('abcdef','def')...` in Netezza is equivalent to `SELECT CHARINDEX('def','abcdef')...` in Azure Synapse. + +- `AGE`: Netezza supports the `AGE` operator to give the interval between two temporal values, such as timestamps or dates. For example, `SELECT AGE('23-03-1956','01-01-2019') FROM...`. In Azure Synapse, `DATEDIFF` gives the interval. For example, `SELECT DATEDIFF(day, '1956-03-26','2019-01-01') FROM...`. Note the date representation sequence. + +- `NOW()`: Netezza uses `NOW()` to represent `CURRENT_TIMESTAMP` in Azure Synapse. + +#### Functions, stored procedures, and sequences + +> [!TIP] +> Assess the number and type of non-data objects to be migrated as part of the preparation phase. + +When migrating from a mature legacy data warehouse environment such as Netezza, you must often migrate elements other than simple tables and views to the new target environment. Examples include functions, stored procedures, and sequences. + +As part of the preparation phase, create an inventory of these objects to be migrated, and define the method of handling them. Assign an appropriate allocation of resources in the project plan. + +There may be facilities in the Azure environment that replace the functionality implemented as functions or stored procedures in the Netezza environment. In this case, it's more efficient to use the built-in Azure facilities rather than recoding the Netezza functions. + +[Data integration partners](../../partner/data-integration.md) offer tools and services that can automate the migration. + +##### Functions + +As with most database products, Netezza supports system functions and user-defined functions within an SQL implementation. When migrating to another database platform such as Azure Synapse, common system functions are available and can be migrated without change. Some system functions may have slightly different syntax, but the required changes can be automated if so. + +For system functions where there's no equivalent, or for arbitrary user-defined functions, recode these using the language(s) available in the target environment. Netezza user-defined functions are coded in nzlua or C++ languages while Azure Synapse uses the popular Transact-SQL language to implement user-defined functions. + +##### Stored procedures + +Most modern database products allow for procedures to be stored within the database. Netezza provides the NZPLSQL language for this purpose. NZPLSQL is based on Postgres PL/pgSQL. + +A stored procedure typically contains SQL statements and some procedural logic, and may return data or a status. + +Azure Synapse Analytics also supports stored procedures using T-SQL. If you must migrate stored procedures, recode these procedures for their new environment. + +##### Sequences + +In Netezza, a sequence is a named database object created via `CREATE SEQUENCE` that can provide the unique value via the `NEXT VALUE FOR` method. Use these to generate unique numbers for use as surrogate key values for primary key values. + +Within Azure Synapse, there's no `CREATE SEQUENCE`. Sequences are handled via use of [IDENTITY](/sql/t-sql/statements/create-table-transact-sql-identity-property?msclkid=8ab663accfd311ec87a587f5923eaa7b) columns or using SQL code to create the next sequence number in a series. + +### Extract metadata and data from a Netezza environment + +#### Data Definition Language (DDL) generation + +> [!TIP] +> Use Netezza external tables for most efficient data extract. + +You can edit existing Netezza CREATE TABLE and CREATE VIEW scripts to create the equivalent definitions with modified data types, if necessary, as described in the previous section. Typically, this involves removing or modifying any extra Netezza-specific clauses such as `ORGANIZE ON`. + +However, all the information that specifies the current definitions of tables and views within the existing Netezza environment is maintained within system catalog tables. These tables are the best source of this information, as it's guaranteed to be up to date and complete. User-maintained documentation may not be in sync with the current table definitions. + +Access the information in these tables via utilities such as `nz_ddl_table` and generate the `CREATE TABLE DDL` statements for the equivalent tables in Azure Synapse. + +Third-party migration and ETL tools also use the catalog information to achieve the same result. + +#### Data extraction from Netezza + +Migrate the raw data from existing Netezza tables into flat delimited files using standard Netezza utilities, such as nzsql, nzunload, and via external tables. Compress these files using gzip and upload them to Azure Blob Storage via AzCopy or by using Azure data transport facilities such as Azure Data Box. + +During a migration exercise, extract the data as efficiently as possible. Use the external tables approach as this is the fastest method. Perform multiple extracts in parallel to maximize the throughput for data extraction. + +This is a simple example of an external table extract: + +```sql +CREATE EXTERNAL TABLE '/tmp/export_tab1.csv' USING (DELIM ',') AS SELECT * from ; +``` + +If sufficient network bandwidth is available, extract data directly from an on-premises Netezza system into Azure Synapse tables or Azure Blob Data Storage by using Azure Data Factory processes or third-party data migration or ETL products. + +Recommended data formats for the extracted data include delimited text files (also called Comma Separated Values or CSV), Optimized Row Columnar (ORC), or Parquet files. + +For more information about the process of migrating data and ETL from a Netezza environment, see [Data migration, ETL, and load for Netezza migration](1-design-performance-migration.md). + +## Performance recommendations for Netezza migrations + +This article provides general information and guidelines about use of performance optimization techniques for Azure Synapse and adds specific recommendations for use when migrating from a Netezza environment. + +### Similarities in performance tuning approach concepts + +> [!TIP] +> Many Netezza tuning concepts hold true for Azure Synapse. + +When moving from a Netezza environment, many of the performance tuning concepts for Azure Data Warehouse will be remarkably familiar. For example: + +- Using data distribution to co-locate data to be joined onto the same processing node + +- Using the smallest data type for a given column will save storage space and accelerate query processing + +- Ensuring data types of columns to be joined are identical will optimize join processing by reducing the need to transform data for matching + +- Ensuring statistics are up to date will help the optimizer produce the best execution plan + +### Differences in performance tuning approach + +> [!TIP] +> Prioritize early familiarity with Azure Synapse tuning options in a migration exercise. + +This section highlights lower-level implementation differences between Netezza and Azure Synapse for performance tuning. + +#### Data distribution options + +`CREATE TABLE` statements in both Netezza and Azure Synapse allow for specification of a distribution definition—via `DISTRIBUTE ON` in Netezza, and `DISTRIBUTION =` in Azure Synapse. + +Compared to Netezza, Azure Synapse provides an additional way to achieve local joins for small table-large table joins (typically dimension table to fact table in a start schema model) is to replicate the smaller dimension table across all nodes. This ensures that any value of the join key of the larger table will have a matching dimension row locally available. The overhead of replicating the dimension tables is relatively low, provided the tables aren't very large (see [Design guidance for replicated tables](../../sql-data-warehouse/design-guidance-for-replicated-tables.md))—in which case, the hash distribution approach as described previously is more appropriate. For more information, see [Distributed tables design](../../sql-data-warehouse/sql-data-warehouse-tables-distribute.md). + +#### Data indexing + +Azure Synapse provides several user-definable indexing options, but these are different from the system managed zone maps in Netezza. To understand the different indexing options, see [table indexes](/azure/sql-data-warehouse/sql-data-warehouse-tables-index). + +The existing system managed zone maps within the source Netezza environment can indicate how the data is currently used. They can identify candidate columns for indexing within the Azure Synapse environment. + +#### Data partitioning + +In an enterprise data warehouse, fact tables can contain many billions of rows. Partitioning optimizes the maintenance and querying of these tables by splitting them into separate parts to reduce the amount of data processed. The `CREATE TABLE` statement defines the partitioning specification for a table. + +Only one field per table can be used for partitioning. This is frequently a date field since many queries are filtered by date or a date range. It's possible to change the partitioning of a table after initial load by recreating the table with the new distribution using the `CREATE TABLE AS` (or CTAS) statement. See [table partitions](/azure/sql-data-warehouse/sql-data-warehouse-tables-partition) for a detailed discussion of partitioning in Azure Synapse. + +#### Data table statistics + +Ensure that statistics on data tables are up to date by building in a [statistics](../../sql/develop-tables-statistics.md) step to ETL/ELT jobs. + +#### PolyBase for data loading + +PolyBase is the most efficient method for loading large amounts of data into the warehouse since it can leverage parallel loading streams. For more information, see [PolyBase data loading strategy](../../sql/load-data-overview.md). + +#### Use workload management + +Use [workload management](../../sql-data-warehouse/sql-data-warehouse-workload-management.md?context=%2fazure%2fsynapse-analytics%2fcontext%2fcontext) instead of resource classes. ETL would be in its own workgroup and should be configured to have more resources per query (less concurrency by more resources). For more information, see [What is dedicated SQL pool in Azure Synapse Analytics](../../sql-data-warehouse/sql-data-warehouse-overview-what-is.md). + +## Next steps + +To learn more about ETL and load for Netezza migration, see the next article in this series: [Data migration, ETL, and load for Netezza migration](2-etl-load-migration-considerations.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/2-etl-load-migration-considerations.md b/articles/synapse-analytics/migration-guides/netezza/2-etl-load-migration-considerations.md new file mode 100644 index 000000000000..239ad5d5e382 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/2-etl-load-migration-considerations.md @@ -0,0 +1,313 @@ +--- +title: "Data migration, ETL, and load for Netezza migration" +description: Learn how to plan your data migration from Netezza to Azure Synapse to minimize the risk and impact on users. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Data migration, ETL, and load for Netezza migration + +This article is part two of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for ETL and load migration. + +## Data migration considerations + +### Initial decisions for data migration from Netezza + +When migrating a Netezza data warehouse, you need to ask some basic data-related questions. For example: + +- Should unused table structures be migrated or not? + +- What's the best migration approach to minimize risk and impact for users? + +- Migrating data marts—stay physical or go virtual? + +The next sections discuss these points within the context of migration from Netezza. + +#### Migrate unused tables? + +> [!TIP] +> In legacy systems, it's not unusual for tables to become redundant over time—these don't need to be migrated in most cases. + +It makes sense to only migrate tables that are in use in the existing system. Tables that aren't active can be archived rather than migrated, so that the data is available if necessary in future. It's best to use system metadata and log files rather than documentation to determine which tables are in use, because documentation can be out of date. + +If enabled, Netezza query history tables contain information that can determine when a given table was last accessed—which can in turn be used to decide whether a table is a candidate for migration. + +Here's an example query that looks for the usage of a specific table within a given time window: + +```sql +SELECT FORMAT_TABLE_ACCESS (usage), + hq.submittime +FROM "$v_hist_queries" hq + INNER JOIN "$hist_table_access_3" hta USING +(NPSID, NPSINSTANCEID, OPID, SESSIONID) +WHERE hq.dbname = 'PROD' +AND hta.schemaname = 'ADMIN' +AND hta.tablename = 'TEST_1' +AND hq.SUBMITTIME > '01-01-2015' +AND hq.SUBMITTIME <= '08-06-2015' +AND +( + instr(FORMAT_TABLE_ACCESS(usage),'ins') > 0 + OR instr(FORMAT_TABLE_ACCESS(usage),'upd') > 0 + OR instr(FORMAT_TABLE_ACCESS(usage),'del') > 0 +) +AND status=0; +``` + +```output +| FORMAT_TABLE_ACCESS | SUBMITTIME +----------------------+--------------------------- +ins | 2015-06-16 18:32:25.728042 +ins | 2015-06-16 17:46:14.337105 +ins | 2015-06-16 17:47:14.430995 +(3 rows) +``` + +This query uses the helper function `FORMAT_TABLE_ACCESS` and the digit at the end of the `$v_hist_table_access_3` view to match the installed query history version. + +#### What is the best migration approach to minimize risk and impact on users? + +> [!TIP] +> Migrate the existing model as-is initially, even if a change to the data model is planned in the future. + +This question comes up often since companies often want to lower the impact of changes on the data warehouse data model to improve agility. Companies see an opportunity to do so during a migration to modernize their data model. This approach carries a higher risk because it could impact ETL jobs populating the data warehouse from a data warehouse to feed dependent data marts. Because of that risk, it's usually better to redesign on this scale after the data warehouse migration. + +Even if a data model change is an intended part of the overall migration, it's good practice to migrate the existing model as-is to the new environment (Azure Synapse in this case), rather than do any re-engineering on the new platform during migration. This approach has the advantage of minimizing the impact on existing production systems, while also leveraging the performance and elastic scalability of the Azure platform for one-off re-engineering tasks. + +When migrating from Netezza, often the existing data model is already suitable for as-is migration to Azure Synapse. + +#### Migrate data marts - stay physical or go virtual? + +> [!TIP] +> Virtualizing data marts can save on storage and processing resources. + +In legacy Netezza data warehouse environments, it's common practice to create several data marts that are structured to provide good performance for ad hoc self-service queries and reports for a given department or business function within an organization. As such, a data mart typically consists of a subset of the data warehouse and contains aggregated versions of the data in a form that enables users to easily query that data with fast response times via user-friendly query tools such as Microsoft Power BI, Tableau, or MicroStrategy. This form is typically a dimensional data model. One use of data marts is to expose the data in a usable form, even if the underlying warehouse data model is something different, such as a data vault. + +You can use separate data marts for individual business units within an organization to implement robust data security regimes, by only allowing users to access specific data marts that are relevant to them, and eliminating, obfuscating, or anonymizing sensitive data. + +If these data marts are implemented as physical tables, they'll require additional storage resources to store them, and additional processing to build and refresh them regularly. Also, the data in the mart will only be as up to date as the last refresh operation, and so may be unsuitable for highly volatile data dashboards. + +> [!TIP] +> The performance and scalability of Azure Synapse enables virtualization without sacrificing performance. + +With the advent of relatively low-cost scalable MPP architectures, such as Azure Synapse, and the inherent performance characteristics of such architectures, it may be that you can provide data mart functionality without having to instantiate the mart as a set of physical tables. This is achieved by effectively virtualizing the data marts via SQL views onto the main data warehouse, or via a virtualization layer using features such as views in Azure or the [visualization products of Microsoft partners](../../partner/data-integration.md). This approach simplifies or eliminates the need for additional storage and aggregation processing and reduces the overall number of database objects to be migrated. + +There's another potential benefit to this approach: by implementing the aggregation and join logic within a virtualization layer, and presenting external reporting tools via a virtualized view, the processing required to create these views is 'pushed down' into the data warehouse, which is generally the best place to run joins, aggregations, and other related operations, on large data volumes. + +The primary drivers for choosing a virtual data mart implementation over a physical data mart are: + +- More agility—a virtual data mart is easier to change than physical tables and the associated ETL processes. + +- Lower total cost of ownership—a virtualized implementation requires fewer data stores and copies of data. + +- Elimination of ETL jobs to migrate and simplify data warehouse architecture in a virtualized environment. + +- Performance—although physical data marts have historically been more performant, virtualization products now implement intelligent caching techniques to mitigate. + +### Data migration from Netezza + +#### Understand your data + +Part of migration planning is understanding in detail the volume of data that needs to be migrated since that can impact decisions about the migration approach. Use system metadata to determine the physical space taken up by the 'raw data' within the tables to be migrated. In this context, 'raw data' means the amount of space used by the data rows within a table, excluding overheads such as indexes and compression. This is especially true for the largest fact tables since these will typically comprise more than 95% of the data. + +Get an accurate number for the volume of data to be migrated for a given table by extracting a representative sample of the data—for example, one million rows—to an uncompressed delimited flat ASCII data file. Then, use the size of that file to get an average raw data size per row of that table. Finally, multiply that average size by the total number of rows in the full table to give a raw data size for the table. Use that raw data size in your planning. + +#### Netezza data type mapping + +> [!TIP] +> Assess the impact of unsupported data types as part of the preparation phase. + +Most Netezza data types have a direct equivalent in Azure Synapse. The following table shows these data types, together with the recommended approach for mapping them. + + +| Netezza data type | Azure Synapse data type | +|-----------------------------------|----------------------------------| +| BIGINT | BIGINT | +| BINARY VARYING(n) | VARBINARY(n) | +| BOOLEAN | BIT | +| BYTEINT | TINYINT | +| CHARACTER VARYING(n) | VARCHAR(n) | +| CHARACTER(n) | CHAR(n) | +| DATE | DATE(DATE | +| DECIMAL(p,s) | DECIMAL(p,s) | +| DOUBLE PRECISION | FLOAT | +| FLOAT(n) | FLOAT(n) | +| INTEGER | INT | +| INTERVAL | INTERVAL data types aren't currently directly supported in ASA but can be calculated using temporal functions, such as DATEDIFF | +| MONEY | MONEY | +| NATIONAL CHARACTER VARYING(n) | NVARCHAR(n) | +| NATIONAL CHARACTER(n) | NCHAR(n) | +| NUMERIC(p,s) | NUMERIC(p,s) | +| REAL | REAL | +| SMALLINT | SMALLINT | +| ST_GEOMETRY(n) | Spatial data types such as ST_GEOMETRY aren't currently supported in Azure Synapse Analytics, but the data could be stored as VARCHAR or VARBINARY | +| TIME | TIME | +| TIME WITH TIME ZONE | DATETIMEOFFSET | +| TIMESTAMP | DATETIME | + +Use the metadata from the Netezza catalog tables to determine whether any of these data types need to be migrated, and allow for this in your migration plan. The important metadata views in Netezza for this type of query are: + +- `_V_USER`: the user view gives information about the users in the Netezza system. + +- `_V_TABLE`: the table view contains the list of tables created in the Netezza performance system. + +- `_V_RELATION_COLUMN`: the relation column system catalog view contains the columns available in a table. + +- `_V_OBJECTS`: the objects view lists the different objects like tables, view, functions, and so on, that are available in Netezza. + +For example, this Netezza SQL query shows columns and column types: + +```sql +SELECT +tablename, + attname AS COL_NAME, + b.FORMAT_TYPE AS COL_TYPE, + attnum AS COL_NUM +FROM _v_table a + JOIN _v_relation_column b + ON a.objid = b.objid +WHERE a.tablename = 'ATT_TEST' +AND a.schema = 'ADMIN' +ORDER BY attnum; +``` + +```output +TABLENAME | COL_NAME | COL_TYPE | COL_NUM +----------+-------------+----------------------+-------- +ATT_TEST | COL_INT | INTEGER | 1 +ATT_TEST | COL_NUMERIC | NUMERIC(10,2) | 2 +ATT_TEST | COL_VARCHAR | CHARACTER VARYING(5) | 3 +ATT_TEST | COL_DATE | DATE | 4 +(4 rows) +``` + +The query can be modified to search all tables for any occurrences of unsupported data types. + +Azure Data Factory can be used to move data from a legacy Netezza environment. For more information, see [IBM Netezza connector](../../../data-factory/connector-netezza.md). + +[Third-party vendors](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration) offer tools and services to automate migration, including the mapping of data types as previously described. Also, third-party ETL tools, like Informatica or Talend, already in use in the Netezza environment can implement all required data transformations. The next section explores the migration of existing third-party ETL processes. + +## ETL migration considerations + +### Initial decisions regarding Netezza ETL migration + +> [!TIP] +> Plan the approach to ETL migration ahead of time and leverage Azure facilities where appropriate. + +For ETL/ELT processing, legacy Netezza data warehouses may use custom-built scripts using Netezza utilities such as nzsql and nzload, or third-party ETL tools such as Informatica or Ab Initio. Sometimes, Netezza data warehouses use a combination of ETL and ELT approaches that's evolved over time. When planning a migration to Azure Synapse, you need to determine the best way to implement the required ETL/ELT processing in the new environment, while minimizing the cost and risk involved. To learn more about ETL and ELT processing, see [ELT vs ETL Design approach](../../sql-data-warehouse/design-elt-data-loading.md). + +The following sections discuss migration options and make recommendations for various use cases. This flowchart summarizes one approach: + +:::image type="content" source="../media/2-etl-load-migration-considerations/migration-options-flowchart.png" border="true" alt-text="Flowchart of migration options and recommendations."::: + +The first step is always to build an inventory of ETL/ELT processes that need to be migrated. As with other steps, it's possible that the standard 'built-in' Azure features make it unnecessary to migrate some existing processes. For planning purposes, it's important to understand the scale of the migration to be performed. + +In the preceding flowchart, decision 1 relates to a high-level decision about whether to migrate to a totally Azure-native environment. If you're moving to a totally Azure-native environment, we recommend that you re-engineer the ETL processing using [Pipelines and activities in Azure Data Factory](../../../data-factory/concepts-pipelines-activities.md?msclkid=b6ea2be4cfda11ec929ac33e6e00db98&tabs=data-factory) or [Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c). If you're not moving to a totally Azure-native environment, then decision 2 is whether an existing third-party ETL tool is already in use. + +> [!TIP] +> Leverage investment in existing third-party tools to reduce cost and risk. + +If a third-party ETL tool is already in use, and especially if there's a large investment in skills or several existing workflows and schedules use that tool, then decision 3 is whether the tool can efficiently support Azure Synapse as a target environment. Ideally, the tool will include 'native' connectors that can leverage Azure facilities like PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql), for the most efficient parallel data loading. There's a way to call an external process, such as PolyBase or `COPY INTO`, and pass in the appropriate parameters. In this case, leverage existing skills and workflows, with Azure Synapse as the new target environment. + +If you decide to retain an existing third-party ETL tool, there may be benefits to running that tool within the Azure environment (rather than on an existing on-premises ETL server) and having Azure Data Factory handle the overall orchestration of the existing workflows. One particular benefit is that less data needs to be downloaded from Azure, processed, and then uploaded back into Azure. So, decision 4 is whether to leave the existing tool running as-is or to move it into the Azure environment to achieve cost, performance, and scalability benefits. + +### Re-engineer existing Netezza-specific scripts + +If some or all the existing Netezza warehouse ETL/ELT processing is handled by custom scripts that utilize Netezza-specific utilities, such as nzsql or nzload, then these scripts need to be recoded for the new Azure Synapse environment. Similarly, if ETL processes were implemented using stored procedures in Netezza, then these will also have to be recoded. + +> [!TIP] +> The inventory of ETL tasks to be migrated should include scripts and stored procedures. + +Some elements of the ETL process are easy to migrate. For example, by simple bulk data load into a staging table from an external file. It may even be possible to automate those parts of the process, for example, by using PolyBase instead of nzload. Other parts of the process that contain arbitrary complex SQL and/or stored procedures will take more time to re-engineer. + +One way of testing Netezza SQL for compatibility with Azure Synapse is to capture some representative SQL statements from Netezza query history, then prefix those queries with `EXPLAIN`, and then (assuming a like-for-like migrated data model in Azure Synapse) run those EXPLAIN statements in Azure Synapse. Any incompatible SQL will generate an error, and the error information can determine the scale of the recoding task. + +[Microsoft partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration) offer tools and services to migrate Netezza SQL and stored procedures to Azure Synapse. + +### Use third-party ETL tools + +As described in the previous section, in many cases the existing legacy data warehouse system will already be populated and maintained by third-party ETL products. For a list of Microsoft data integration partners for Azure Synapse, see [Data integration partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration). + +## Data loading from Netezza + +### Choices available when loading data from Netezza + +> [!TIP] +> Third-party tools can simplify and automate the migration process and therefore reduce risk. + +When it comes to migrating data from a Netezza data warehouse, there are some basic questions associated with data loading that need to be resolved. You'll need to decide how the data will be physically moved from the existing on-premises Netezza environment into Azure Synapse in the cloud, and which tools will be used to perform the transfer and load. Consider the following questions, which is discussed in the next sections. + +- Will you extract the data to files, or move it directly via a network connection? + +- Will you orchestrate the process from the source system, or from the Azure target environment? + +- Which tools will you use to automate and manage the process? + +#### Transfer data via files or network connection? + +> [!TIP] +> Understand the data volumes to be migrated and the available network bandwidth since these factors influence the migration approach decision. + +Once the database tables to be migrated have been created in Azure Synapse, you can move the data to populate those tables out of the legacy Netezza system and loaded into the new environment. There are two basic approaches: + +- **File Extract**—Extract the data from the Netezza tables to flat files, normally in CSV format, via nzsql with the -o option or via the `CREATE EXTERNAL TABLE` statement. Use an external table whenever possible since it's the most efficient in terms of data throughput. The following SQL example, creates a CSV file via an external table: + + ```sql + CREATE EXTERNAL TABLE '/data/export.csv' USING (delimiter ',') + AS SELECT col1, col2, expr1, expr2, col3, col1 || col2 FROM your table; + ``` + + Use an external table if you're exporting data to a mounted file system on a local Netezza host. If you're exporting data to a remote machine that has JDBC, ODBC, or OLEDB installed, then your 'remotesource odbc' option is the `USING` clause. + + This approach requires space to land the extracted data files. The space could be local to the Netezza source database (if sufficient storage is available), or remote in Azure Blob Storage. The best performance is achieved when a file is written locally, since that avoids network overhead. + + To minimize the storage and network transfer requirements, it's good practice to compress the extracted data files using a utility like gzip. + + Once extracted, the flat files can either be moved into Azure Blob Storage (co-located with the target Azure Synapse instance), or loaded directly into Azure Synapse using PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql). The method for physically moving data from local on-premises storage to the Azure cloud environment depends on the amount of data and the available network bandwidth. + + Microsoft provides various options to move large volumes of data, including AzCopy for moving files across the network into Azure Storage, Azure ExpressRoute for moving bulk data over a private network connection, and Azure Data Box for files moving to a physical storage device that's then shipped to an Azure data center for loading. For more information, see [data transfer](/azure/architecture/data-guide/scenarios/data-transfer). + +- **Direct extract and load across network**—The target Azure environment sends a data extract request, normally via a SQL command, to the legacy Netezza system to extract the data. The results are sent across the network and loaded directly into Azure Synapse, with no need to land the data into intermediate files. The limiting factor in this scenario is normally the bandwidth of the network connection between the Netezza database and the Azure environment. For very large data volumes, this approach may not be practical. + +There's also a hybrid approach that uses both methods. For example, you can use the direct network extract approach for smaller dimension tables and samples of the larger fact tables to quickly provide a test environment in Azure Synapse. For large volume historical fact tables, you can use the file extract and transfer approach using Azure Data Box. + +#### Orchestrate from Netezza or Azure? + +The recommended approach when moving to Azure Synapse is to orchestrate the data extract and loading from the Azure environment using [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779), as well as associated utilities, such as PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql), for the most efficient data loading. This approach leverages Azure capabilities and provides an easy method to build reusable data loading pipelines. + +Other benefits of this approach include reduced impact on the Netezza system during the data load process since the management and loading process is running in Azure, and the ability to automate the process by using metadata-driven data load pipelines. + +#### Which tools can be used? + +The task of data transformation and movement is the basic function of all ETL products. If one of these products is already in use in the existing Netezza environment, then using the existing ETL tool may simplify data migration from Netezza to Azure Synapse. This approach assumes that the ETL tool supports Azure Synapse as a target environment. For more information on tools that support Azure Synapse, see [Data integration partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration). + +If you're using an ETL tool, consider running that tool within the Azure environment to benefit from Azure cloud performance, scalability, and cost, and free up resources in the Netezza data center. Another benefit is reduced data movement between the cloud and on-premises environments. + +## Summary + +To summarize, our recommendations for migrating data and associated ETL processes from Netezza to Azure Synapse are: + +- Plan ahead to ensure a successful migration exercise. + +- Build a detailed inventory of data and processes to be migrated as soon as possible. + +- Use system metadata and log files to get an accurate understanding of data and process usage. Don't rely on documentation since it may be out of date. + +- Understand the data volumes to be migrated, and the network bandwidth between the on-premises data center and Azure cloud environments. + +- Leverage standard 'built-in' Azure features when appropriate, to minimize the migration workload. + +- Identify and understand the most efficient tools for data extract and load in both Netezza and Azure environments. Use the appropriate tools in each phase in the process. + +- Use Azure facilities, such as [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779), to orchestrate and automate the migration process while minimizing impact on the Netezza system. + +## Next steps + +To learn more about security access operations, see the next article in this series: [Security, access, and operations for Netezza migrations](3-security-access-operations.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/3-security-access-operations.md b/articles/synapse-analytics/migration-guides/netezza/3-security-access-operations.md new file mode 100644 index 000000000000..2792d2146146 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/3-security-access-operations.md @@ -0,0 +1,316 @@ +--- +title: "Security, access, and operations for Netezza migrations" +description: Learn about authentication, users, roles, permissions, monitoring, and auditing, and workload management in Azure Synapse and Netezza. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Security, access, and operations for Netezza migrations + +This article is part three of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for security access operations. + +## Security considerations + +This article discusses the methods of connection for existing legacy Netezza environments and how they can be migrated to Azure Synapse with minimal risk and user impact. + +It's assumed that there's a requirement to migrate the existing methods of connection and user/role/permission structure as-is. If this isn't the case, then use Azure utilities such as Azure portal to create and manage a new security regime. + +For more information on the [Azure Synapse security](../../sql-data-warehouse/sql-data-warehouse-overview-manage-security.md#authorization) options see [Security whitepaper](../../guidance/security-white-paper-introduction.md). + +### Connection and authentication + +> [!TIP] +> Authentication in both Netezza and Azure Synapse can be "in database" or through external methods. + +#### Netezza authorization options + +The IBM® Netezza® system offers several authentication methods for Netezza database users: + +- **Local authentication**: Netezza administrators define database users and their passwords by using the `CREATE USER` command or through Netezza administrative interfaces. In local authentication, use the Netezza system to manage database accounts and passwords, and to add and remove database users from the system. This method is the default authentication method. + +- **LDAP authentication**: Use an LDAP name server to authenticate database users, manage passwords, database account activations, and deactivations. The Netezza system uses a Pluggable Authentication Module (PAM) to authenticate users on the LDAP name server. Microsoft Active Directory conforms to the LDAP protocol, so it can be treated like an LDAP server for the purposes of LDAP authentication. + +- **Kerberos authentication**: Use a Kerberos distribution server to authenticate database users, manage passwords, database account activations, and deactivations. + +Authentication is a system-wide setting. Users must be either locally authenticated or authenticated by using the LDAP or Kerberos method. If you choose LDAP or Kerberos authentication, create users with local authentication on a per-user basis. LDAP and Kerberos can't be used at the same time to authenticate users. Netezza host supports LDAP or Kerberos authentication for database user logins only, not for operating system logins on the host. + +#### Azure Synapse authorization options + +Azure Synapse supports two basic options for connection and authorization: + +- **SQL authentication**: SQL authentication is via a database connection that includes a database identifier, user ID, and password plus other optional parameters. This is functionally equivalent to Netezza local connections. + +- **Azure Active Directory (Azure AD) authentication**: With Azure Active Directory authentication, you can centrally manage the identities of database users and other Microsoft services in one central location. Central ID management provides a single place to manage SQL Data Warehouse users and simplifies permission management. Azure AD can also support connections to LDAP and Kerberos services—for example, Azure AD can be used to connect to existing LDAP directories if these are to remain in place after migration of the database. + +### Users, roles, and permissions + +#### Overview + +> [!TIP] +> High-level planning is essential for a successful migration project. + +Both Netezza and Azure Synapse implement database access control via a combination of users, roles (groups in Netezza), and permissions. Both use standard `SQL CREATE USER` and `CREATE ROLE/GROUP` statements to define users and roles, and `GRANT` and `REVOKE` statements to assign or remove permissions to those users and/or roles. + +> [!TIP] +> Automation of migration processes is recommended to reduce elapsed time and scope for errors. + +Conceptually the two databases are similar, and it might be possible to automate the migration of existing user IDs, groups, and permissions to some degree. Migrate such data by extracting the existing legacy user and group information from the Netezza system catalog tables and generating matching equivalent `CREATE USER` and `CREATE ROLE` statements to be run in Azure Synapse to recreate the same user/role hierarchy. + +After data extraction, use Netezza system catalog tables to generate equivalent `GRANT` statements to assign permissions (where an equivalent one exists). The following diagram shows how to use existing metadata to generate the necessary SQL. + +:::image type="content" source="../media/3-security-access-operations/automating-migration-privileges.png" border="true" alt-text="Chart showing how to automate the migration of privileges from an existing system."::: + +See the following sections for more details. + +#### Users and roles + +> [!TIP] +> Migration of a data warehouse requires more than just tables, views, and SQL statements. + +The information about current users and groups in a Netezza system is held in system catalog views `_v_users` and `_v_groupusers`. Use the nzsql utility or tools such as the Netezza® Performance, NzAdmin, or the Netezza Utility scripts to list user privileges. For example, use the `dpu` and `dpgu` commands in nzsql to display users or groups with their permissions. + +Use or edit the utility scripts `nz_get_users` and `nz_get_user_groups` to retrieve the same information in the required format. + +Query system catalog views directly (if the user has `SELECT` access to those views) to obtain current lists of users and roles defined within the system. See examples to list users, groups, or users and their associated groups: + +```sql +-- List of users +SELECT USERNAME FROM _V_USER; + +--List of groups +SELECT DISTINCT(GROUPNAME) FROM _V_USERGROUPS; + +--List of users and their associated groups +SELECT USERNAME, GROUPNAME FROM _V_GROUPUSERS; +``` + +Modify the example `SELECT` statement to produce a result set that is a series of `CREATE USER` and `CREATE GROUP` statements by including the appropriate text as a literal within the `SELECT` statement. + +There's no way to retrieve existing passwords, so you need to implement a scheme for allocating new initial passwords on Azure Synapse. + +#### Permissions + +> [!TIP] +> There are equivalent Azure Synapse permissions for basic database operations such as DML and DDL. + +In a Netezza system, the system table `_t_usrobj_priv` holds the access rights for users and roles. Query these tables (if the user has `SELECT` access to those tables) to obtain current lists of access rights defined within the system. + +In Netezza, the individual permissions are represented as individual bits within field privileges or g_privileges. See example SQL statement at [user group permissions](http://nz2nz.blogspot.com/2016/03/netezza-user-group-permissions-view_3.html) + +The simplest way to obtain a DDL script that contains the `GRANT` commands to replicate the current privileges for users and groups is to use the appropriate Netezza utility scripts: + +```sql +--List of group privileges +nz_ddl_grant_group -usrobj dbname > output_file_dbname; + +--List of user privileges +nz_ddl_grant_user -usrobj dbname > output_file_dbname; +``` + +The output file can be modified to produce a script that is a series of `GRANT` statements for Azure Synapse. + +Netezza supports two classes of access rights,—Admin and Object. See the following table for a list of Netezza access rights and their equivalent in Azure Synapse. + +| Admin Privilege | Description | Azure Synapse Equivalent | +|----------------------------|-------------|-----------------| +| Backup | Allows user to create backups. The user can run backups. The user can run the command `nzbackup`. | \* | +| [Create] Aggregate | Allows the user to create user-defined aggregates (UDAs). Permission to operate on existing UDAs is controlled by object privileges. | CREATE FUNCTION \*\*\* | +| [Create] Database | Allows the user to create databases. Permission to operate on existing databases is controlled by object privileges. | CREATE DATABASE | +| [Create] External Table | Allows the user to create external tables. Permission to operate on existing tables is controlled by object privileges. | CREATE TABLE | +| [Create] Function | Allows the user to create user-defined functions (UDFs). Permission to operate on existing UDFs is controlled by object privileges. | CREATE FUNCTION | +| [Create] Group | Allows the user to create groups. Permission to operate on existing groups is controlled by object privileges. | CREATE ROLE | +| [Create] Index | For system use only. Users can't create indexes. | CREATE INDEX | +| [Create] Library | Allows the user to create shared libraries. Permission to operate on existing shared libraries is controlled by object privileges. | \* | +| [Create] Materialized View | Allows the user to create materialized views. | CREATE VIEW | +| [Create] Procedure | Allows the user to create stored procedures. Permission to operate on existing stored procedures is controlled by object privileges. | CREATE PROCEDURE | +| [Create] Schema | Allows the user to create schemas. Permission to operate on existing schemas is controlled by object privileges. | CREATE SCHEMA | +| [Create] Sequence | Allows the user to create database sequences. | \* | +| [Create] Synonym | Allows the user to create synonyms. | CREATE SYNONYM | +| [Create] Table | Allows the user to create tables. Permission to operate on existing tables is controlled by object privileges. | CREATE TABLE | +| [Create] Temp Table | Allows the user to create temporary tables. Permission to operate on existing tables is controlled by object privileges. | CREATE TABLE | +| [Create] User | Allows the user to create users. Permission to operate on existing users is controlled by object privileges. | CREATE USER | +| [Create] View | Allows the user to create views. Permission to operate on existing views is controlled by object privileges. | CREATE VIEW | +| [Manage Hardware | Allows the user to do the following hardware-related operations: view hardware status, manage SPUs, manage topology and mirroring, and run diagnostic tests. The user can run these commands: nzhw and nzds. | \*\*\*\* | +| [Manage Security | Allows the user to run commands and operations that relate to the following advanced security options such as: managing and configuring history databases, managing multi- level security objects, and specifying security for users and groups, managing database key stores and keys and key stores for the digital signing of audit data. | \*\*\*\* | +| [Manage System | Allows the user to do the following management operations: start/stop/pause/resume the system, abort sessions, view the distribution map, system statistics, and logs. The user can use these commands: nzsystem, nzstate, nzstats, and nzsession. | \*\*\*\* | +| Restore | Allows the user to restore the system. The user can run the nzrestore command. | \*\* | +| Unfence | Allows the user to create or alter a user-defined function or aggregate to run in unfenced mode. | \* | + +| Object Privilege Abort | Description | Azure Synapse Equivalent | +|----------------------------|-------------|-----------------| +| Abort | Allows the user to abort sessions. Applies to groups and users. | KILL DATABASE CONNECTION | +| Alter | Allows the user to modify object attributes. Applies to all objects. | ALTER | +| Delete | Allows the user to delete table rows. Applies only to tables. | DELETE | +| Drop | Allows the user to drop objects. Applies to all object types. | DROP | +| Execute | Allows the user to run user-defined functions, user-defined aggregates, or stored procedures. | EXECUTE | +| GenStats | Allows the user to generate statistics on tables or databases. The user can run GENERATE STATISTICS command. | \*\* | +| Groom | Allows the user to reclaim disk space for deleted or outdated rows, and reorganize a table by the organizing keys, or to migrate data for tables that have multiple stored versions. | \*\* | +| Insert | Allows the user to insert rows into a table. Applies only to tables. | INSERT | +| List | Allows the user to display an object name, either in a list or in another manner. Applies to all objects. | LIST | +| Select | Allows the user to select (or query) rows within a table. Applies to tables and views. | SELECT | +| Truncate | Allows the user to delete all rows from a table. Applies only to tables. | TRUNCATE | +| Update | Allows the user to modify table rows. Applies to tables only. | UPDATE | + +Comments on the preceding table: + +\* There's no direct equivalent to this function in Azure Synapse. + +\*\* These Netezza functions are handled automatically in Azure Synapse. + +\*\*\* The Azure Synapse `CREATE FUNCTION` feature incorporates Netezza aggregate functionality. + +\*\*\*\* These features are managed automatically by the system or via Azure portal in Azure Synapse—see the next section on Operational considerations. + +Refer to [Azure Synapse Analytics security permissions](../../guidance/security-white-paper-introduction.md). + +## Operational considerations + +> [!TIP] +> Operational tasks are necessary to keep any data warehouse operating efficiently. + +This section discusses how to implement typical Netezza operational tasks in Azure Synapse with minimal risk and impact to users. + +As with all data warehouse products, once in production there are ongoing management tasks that are necessary to keep the system running efficiently and to provide data for monitoring and auditing. Resource utilization and capacity planning for future growth also falls into this category, as does backup/restore of data. + +Netezza administration tasks typically fall into two categories: + +- System administration, which is managing the hardware, configuration settings, system status, access, disk space, usage, upgrades, and other tasks. + +- Database administration, which is managing user databases and their content, loading data, backing up data, restoring data, and controlling access to data and permissions. + +IBM® Netezza® offers several ways or interfaces that you can use to perform the various system and database management tasks: + +- Netezza commands (nz* commands) are installed in the /nz/kit/bin directory on the Netezza host. For many of the nz* commands, you must be able to sign into the Netezza system to access and run those commands. In most cases, users sign in as the default nz user account, but you can create other Linux user accounts on your system. Some commands require you to specify a database user account, password, and database to ensure that you've permission to do the task. + +- The Netezza CLI client kits package a subset of the nz* commands that can be run from Windows and UNIX client systems. The client commands might also require you to specify a database user account, password, and database to ensure that you've database administrative and object permissions to perform the task. + +- The SQL commands support administration tasks and queries within a SQL database session. You can run the SQL commands from the Netezza nzsql command interpreter or through SQL APIs such as ODBC, JDBC, and the OLE DB Provider. You must have a database user account to run the SQL commands with appropriate permissions for the queries and tasks that you perform. + +- The NzAdmin tool is a Netezza interface that runs on Windows client workstations to manage Netezza systems. + +While conceptually the management and operations tasks for different data warehouses are similar, the individual implementations may differ. In general, modern cloud-based products such as Azure Synapse tend to incorporate a more automated and "system managed" approach (as opposed to a more 'manual' approach in legacy data warehouses such as Netezza). + +The following sections compare Netezza and Azure Synapse options for various operational tasks. + +### Housekeeping tasks + +> [!TIP] +> Housekeeping tasks keep a production warehouse operating efficiently and optimize use of resources such as storage. + +In most legacy data warehouse environments, regular 'housekeeping' tasks are time-consuming. Reclaim disk storage space by removing old versions of updated or deleted rows or reorganizing data, log file or index blocks for efficiency (`GROOM` and `VACUUM` in Netezza). Collecting statistics is also a potentially time-consuming task, required after a bulk data ingest to provide the query optimizer with up-to-date data on which to base query execution plans. + +Netezza recommends collecting statistics as follows: + +- Collect statistics on unpopulated tables to set up the interval histogram used in internal processing. This initial collection makes subsequent statistics collections faster. Make sure to recollect statistics after data is added. + +- Prototype phase, newly populated tables. + +- Production phase, after a significant percentage of change to the table or partition (~10% rows). For high volumes of nonunique values, such as dates or timestamps, it may be advantageous to recollect at 7%. + +- Recommendation: Collect production phase statistics after you've created users and applied real world query loads to the database (up to about three months of querying). + +- Collect statistics in the first few weeks after an upgrade or migration during periods of low CPU utilization. + +Netezza Database contains many log tables in the Data Dictionary that accumulate data, either automatically or after certain features are enabled. Because log data grows over time, purge older information to avoid using up permanent space. There are options to automate the maintenance of these logs available. + +> [!TIP] +> Automate and monitor housekeeping tasks in Azure. + +Azure Synapse has an option to automatically create statistics so that they can be used as needed. Perform defragmentation of indexes and data blocks manually, on a scheduled basis, or automatically. Leveraging native built-in Azure capabilities can reduce the effort required in a migration exercise. + +### Monitoring and auditing + +> [!TIP] +> Netezza Performance Portal is the recommended method of monitoring and logging for Netezza systems. + +Netezza provides the Netezza Performance Portal to monitor various aspects of one or more Netezza systems including activity, performance, queuing, and resource utilization. Netezza Performance Portal is an interactive GUI which allows users to drill down into low-level details for any chart. + +> [!TIP] +> Azure Portal provides a GUI to manage monitoring and auditing tasks for all Azure data and processes. + +Similarly, Azure Synapse provides a rich monitoring experience within the Azure portal to provide insights into your data warehouse workload. The Azure portal is the recommended tool when monitoring your data warehouse as it provides configurable retention periods, alerts, recommendations, and customizable charts and dashboards for metrics and logs. + +The portal also enables integration with other Azure monitoring services such as Operations Management Suite (OMS) and Azure Monitor (logs) to provide a holistic monitoring experience for not only the data warehouse but also the entire Azure analytics platform for an integrated monitoring experience. + +> [!TIP] +> Low-level and system-wide metrics are automatically logged in Azure Synapse. +Resource utilization statistics for the Azure Synapse are automatically logged within the system. The metrics include usage statistics for CPU, memory, cache, I/O and temporary workspace for each query as well as connectivity information—such as failed connection attempts. + +Azure Synapse provides a set of [Dynamic management views](../../sql-data-warehouse/sql-data-warehouse-manage-monitor.md?msclkid=3e6eefbccfe211ec82d019ada29b1834) (DMVs). These views are useful when actively troubleshooting and identifying performance bottlenecks with your workload. + +For more information, see [Azure Synapse operations and management options](/azure/sql-data-warehouse/sql-data-warehouse-how-to-manage-and-monitor-workload-importance). + +### High Availability (HA) and Disaster Recovery (DR) + +Netezza appliances are redundant, fault-tolerant systems and there are diverse options in a Netezza system to enable high availability and disaster recovery. + +Adding IBM® Netezza Replication Services for disaster recovery improves fault tolerance by extending redundancy across local and wide area networks. + +IBM Netezza Replication Services protects against data loss by synchronizing data on a primary system (the primary node) with data on one or more target nodes (subordinates). These nodes make up a replication set. + +High-Availability Linux (also called *Linux-HA*) provides the failover capabilities from a primary or active Netezza host to a secondary or standby Netezza host. The main cluster management daemon in the Linux-HA solution is called *Heartbeat*. Heartbeat watches the hosts and manages the communication and status checks of services. + +Each service is a resource. + +Netezza groups the Netezza-specific services into the nps resource group. When Heartbeat detects problems that imply a host failure condition or loss of service to the Netezza users, Heartbeat can initiate a failover to the standby host. For details about Linux-HA and its terms and operations, see the documentation at [http://www.linux-ha.org](http://www.linux-ha.org/). + +Distributed Replicated Block Device (DRBD) is a block device driver that mirrors the content of block devices (hard disks, partitions, and logical volumes) between the hosts. Netezza uses the DRBD replication only on the **/nz** and **/export/home** partitions. As new data is written to the **/nz** partition and the **/export/home** partition on the primary host, the DRBD software automatically makes the same changes to the **/nz** and **/export/home** partition of the standby host. + +> [!TIP] +> Azure Synapse creates snapshots automatically to ensure fast recovery times. + +Azure Synapse uses database snapshots to provide high availability of the warehouse. A data warehouse snapshot creates a restore point that can be used to recover or copy a data warehouse to a previous state. Since Azure Synapse is a distributed system, a data warehouse snapshot consists of many files that are in Azure storage. Snapshots capture incremental changes from the data stored in your data warehouse. + +> [!TIP] +> Use user-defined snapshots to define a recovery point before key updates. + +> [!TIP] +> Microsoft Azure provides automatic backups to a separate geographical location to enable DR. + +Azure Synapse automatically takes snapshots throughout the day, creating restore points that are available for seven days. You can't change this retention period. Azure Synapse supports an eight-hour recovery point objective (RPO). A data warehouse can be restored in the primary region from any one of the snapshots taken in the past seven days. + +User-defined restore points are also supported, allowing manual triggering of snapshots to create restore points of a data warehouse before and after large modifications. This capability ensures that restore points are logically consistent, which provides additional data protection in case of any workload interruptions or user errors for a desired RPO less than 8 hours. + +As well as the snapshots described previously, Azure Synapse also performs as standard a geo-backup once per day to a [paired data center.](/azure/best-practices-availability-paired-regions) The RPO for a geo-restore is 24 hours. You can restore the geo-backup to a server in any other region where Azure Synapse is supported. A geo-backup ensures that a data warehouse can be restored in case the restore points in the primary region aren't available. + +| Technique | Description | +|-----------|-------------| +| **Scheduler rules** | Scheduler rules influence the scheduling of plans. Each scheduler rule specifies a condition or set of conditions. Each time the scheduler receives a plan, it evaluates all modifying scheduler rules and carries out the appropriate actions. Each time the scheduler selects a plan for execution, it evaluates all limiting scheduler rules. The plan is executed only if doing so wouldn't exceed a limit imposed by a limiting scheduler rule. Otherwise, the plan waits. This provides you with a way to classify and manipulate plans in a way that influences the other WLM techniques (SQB, GRA, and PQE). | +| **Guaranteed resource allocation (GRA)** | You can assign a minimum share and a maximum percentage of total system resources to entities called *resource groups*. The scheduler ensures that each resource group receives system resources in proportion to its minimum share. A resource group receives a larger share of resources when other resource groups are idle, but never receives more than its configured maximum percentage. Each plan is associated with a resource group, and the settings of that resource group settings determine what fraction of available system resources are to be made available to process the plan. | +| **Short query bias (SQB)** | Resources (that is, scheduling slots, memory, and preferential queuing) are reserved for short queries. A short query is a query for which the cost estimate is less than a specified maximum value (the default is two seconds). With SQB, short queries can run even when the system is busy processing other, longer queries. | +| **Prioritized query execution (PQE)** | Based on settings that you configure, the system assigns a priority—critical, high, normal, or low—to each query. The priority depends on factors such as the user, group, or session associated with the query. The system can then use the priority as a basis for allocating resources. | + +### Workload management + +> [!TIP] +> In a production data warehouse, there are typically mixed workloads which have different resource usage characteristics running concurrently. + +Netezza incorporates various features for managing workloads: + +In Azure Synapse, resource classes are pre-determined resource limits that govern compute resources and concurrency for query execution. Resource classes can help you manage your workload by setting limits on the number of queries that run concurrently and on the compute resources assigned to each query. There's a trade-off between memory and concurrency. + +See [Resource classes for workload management](/azure/sql-data-warehouse/resource-classes-for-workload-management) for detailed information. + +This information can also be used for capacity planning, determining the resources required for additional users or application workload. This also applies to planning scale up/scale downs of compute resources for cost-effective support of 'peaky' workloads. + +### Scale compute resources + +> [!TIP] +> A major benefit of Azure is the ability to independently scale up and down compute resources on demand to handle peaky workloads cost-effectively. + +The architecture of Azure Synapse separates storage and compute, allowing each to scale independently. As a result, [compute resources can be scaled](../../sql-data-warehouse/quickstart-scale-compute-portal.md) to meet performance demands independent of data storage. You can also pause and resume compute resources. A natural benefit of this architecture is that billing for compute and storage is separate. If a data warehouse isn't in use, save on compute costs by pausing compute. + +Compute resources can be scaled up or scaled back by adjusting the data warehouse units setting for the data warehouse. Loading and query performance will increase linearly as you add more data warehouse units. + +Adding more compute nodes adds more compute power and ability to leverage more parallel processing. As the number of compute nodes increases, the number of distributions per compute node decreases, providing more compute power and parallel processing for queries. Similarly, decreasing data warehouse units reduces the number of compute nodes, which reduces the compute resources for queries. + +## Next steps + +To learn more about visualization and reporting, see the next article in this series: [Visualization and reporting for Netezza migrations](4-visualization-reporting.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/4-visualization-reporting.md b/articles/synapse-analytics/migration-guides/netezza/4-visualization-reporting.md new file mode 100644 index 000000000000..ed31095736a9 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/4-visualization-reporting.md @@ -0,0 +1,316 @@ +--- +title: "Visualization and reporting for Netezza migrations" +description: Learn about Microsoft and third-party BI tools for reports and visualizations in Azure Synapse compared to Netezza. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Visualization and reporting for Netezza migrations + +This article is part four of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for visualization and reporting. + +## Access Azure Synapse Analytics using Microsoft and third-party BI tools + +Almost every organization accesses data warehouses and data marts using a range of BI tools and applications, such as: + +- Microsoft BI tools, like Power BI. + +- Office applications, like Microsoft Excel spreadsheets. + +- Third-party BI tools from various vendors. + +- Custom analytic applications that have embedded BI tool functionality inside the application. + +- Operational applications that request BI on demand, by invoking queries and reports as-a-service on a BI platform, which in turn queries data in the data warehouse or data marts that are being migrated. + +- Interactive data science development tools, such as Azure Synapse Spark Notebooks, Azure Machine Learning, RStudio, Jupyter notebooks. + +The migration of visualization and reporting as part of a data warehouse migration program means that all the existing queries, reports, and dashboards generated and issued by these tools and applications, need to run on Azure Synapse and yield the same results as they did in the original data warehouse prior to migration. + +> [!TIP] +> Existing users, user groups, roles and assignments of access security privileges need to be migrated first for migration of reports and visualizations to succeed. + +To make that happen, everything that BI tools and applications depend on needs to work once you migrate your data warehouse schema and data to Azure Synapse. That includes the obvious and the not so obvious—such as access and security. Access and security are important considerations for data access in the migrated system, and are specifically discussed in [another guide](3-security-access-operations.md) in this series. When you address access and security, ensure that: + +- Authentication is migrated to let users sign in to the data warehouse and data mart databases on Azure Synapse. + +- All users are migrated to Azure Synapse. + +- All user groups are migrated to Azure Synapse. + +- All roles are migrated to Azure Synapse. + +- All authorization privileges governing access control are migrated to Azure Synapse. + +- User, role, and privilege assignments are migrated to mirror what you had on your existing data warehouse before migration. For example: + - Database object privileges assigned to roles + - Roles assigned to user groups + - Users assigned to user groups and/or roles + +> [!TIP] +> Communication and business user involvement is critical to success. + +In addition, all the required data needs to be migrated to ensure the same results appear in the same reports and dashboards that now query data on Azure Synapse. User expectation will undoubtedly be that migration is seamless and there will be no surprises that destroy their confidence in the migrated system on Azure Synapse. So, this is an area where you must take extreme care and communicate as much as possible to allay any fears in your user base. Their expectations are that: + +- Table structure will be the same if directly referred to in queries + +- Table and column names remain the same if directly referred to in queries; for instance, so that calculated fields defined on columns in BI tools don't fail when aggregate reports are produced + +- Historical analysis remains the same + +- Data types should, if possible, remain the same + +- Query behavior remains the same + +- ODBC / JDBC drivers are tested to make sure nothing has changed in terms of query behavior + +> [!TIP] +> Views and SQL queries using proprietary SQL query extensions are likely to result in incompatibilities that impact BI reports and dashboards. + +If BI tools are querying views in the underlying data warehouse or data mart database, then will these views still work? You might think yes, but if there are proprietary SQL extensions, specific to your legacy data warehouse DBMS in these views that have no equivalent in Azure Synapse, you'll need to know about them and find a way to resolve them. + +Other issues like the behavior of nulls or data type variations across DBMS platforms need to be tested, in case they cause slightly different calculation results. Obviously, you want to minimize these issues and take all necessary steps to shield business users from any kind of impact. Depending on your legacy data warehouse system (such as Netezza), there are [tools](../../partner/data-integration.md) that can help hide these differences so that BI tools and applications are kept unaware of them and can run unchanged. + +> [!TIP] +> Use repeatable tests to ensure reports, dashboards, and other visualizations migrate successfully. + +Testing is critical to visualization and report migration. You need a test suite and agreed-on test data to run and rerun tests in both environments. A test harness is also useful, and a few are mentioned later in this guide. In addition, it's also important to have significant business involvement in this area of migration to keep confidence high and to keep them engaged and part of the project. + +Finally, you may also be thinking about switching BI tools. For example, you might want to [migrate to Power BI](/power-bi/guidance/powerbi-migration-overview). The temptation is to do all of this at the same time, while migrating your schema, data, ETL processing, and more. However, to minimize risk, it's better to migrate to Azure Synapse first and get everything working before undertaking further modernization. + +If your existing BI tools run on premises, ensure that they're able to connect to Azure Synapse through your firewall to run comparisons against both environments. Alternatively, if the vendor of your existing BI tools offers their product on Azure, you can try it there. The same applies for applications running on premises that embed BI or that call your BI server on-demand, requesting a "headless report" with data returned in XML or JSON, for example. + +There's a lot to think about here, so let's look at all this in more detail. + +> [!TIP] +> A lift and shift data warehouse migration are likely to minimize any disruption to reports, dashboards, and other visualizations. + +## Minimize the impact of data warehouse migration on BI tools and reports using data virtualization + +> [!TIP] +> Data virtualization allows you to shield business users from structural changes during migration so that they remain unaware of changes. + +The temptation during data warehouse migration to the cloud is to take the opportunity to make changes during the migration to fulfill long-term requirements, such as opening business requests, missing data, new features, and more. However, if you're going to do that, it can affect BI tool business users and applications accessing your data warehouse, especially if it involves structural changes in your data model. Even if there were no new data structures because of new requirements, but you're considering adopting a different data modeling technique (like Data Vault) in your migrated data warehouse, you're likely to cause structural changes that impact BI reports and dashboards. If you want to adopt an agile data modeling technique, do so after migration. One way in which you can minimize the impact of things like schema changes on BI tools, users, and the reports they produce, is to introduce data virtualization between BI tools and your data warehouse and data marts. The following diagram shows how data virtualization can hide the migration from users. + +:::image type="content" source="../media/4-visualization-reporting/migration-data-virtualization.png" border="true" alt-text="Diagram showing how to hide the migration from users through data virtualization."::: + +This breaks the dependency between business users utilizing self-service BI tools and the physical schema of the underlying data warehouse and data marts that are being migrated. + +> [!TIP] +> Schema alterations to tune your data model for Azure Synapse can be hidden from users. + +By introducing data virtualization, any schema alternations made during data warehouse and data mart migration to Azure Synapse (to optimize performance, for example) can be hidden from business users because they only access virtual tables in the data virtualization layer. If structural changes are needed, only the mappings between the data warehouse or data marts, and any virtual tables would need to be changed so that users remain unaware of those changes and unaware of the migration. [Microsoft partners](../../partner/data-integration.md) provides a useful data virtualization software. + +## Identify high priority reports to migrate first + +A key question when migrating your existing reports and dashboards to Azure Synapse is which ones to migrate first. Several factors can drive the decision. For example: + +- Business value + +- Usage + +- Ease of migration + +- Data migration strategy + +These factors are discussed in more detail later in this article. + +Whatever the decision is, it must involve the business, since they produce the reports and dashboards, and consume the insights these artifacts provide in support of the decisions that are made around your business. That said, if most reports and dashboards can be migrated seamlessly, with minimal effort, and offer up like-for-like results, simply by pointing your BI tool(s) at Azure Synapse, instead of your legacy data warehouse system, then everyone benefits. Therefore, if it's that straight forward and there's no reliance on legacy system proprietary SQL extensions, then there's no doubt that the above ease of migration option breeds confidence. + +### Migrate reports based on usage + +Usage is interesting, since it's an indicator of business value. Reports and dashboards that are never used clearly aren't contributing to supporting any decisions and don't currently offer any value. So, do you have any mechanism for finding out which reports and dashboards are currently not used? Several BI tools provide statistics on usage, which would be an obvious place to start. + +If your legacy data warehouse has been up and running for many years, there's a high chance you could have hundreds, if not thousands, of reports in existence. In these situations, usage is an important indicator to the business value of a specific report or dashboard. In that sense, it's worth compiling an inventory of the reports and dashboards you've and defining their business purpose and usage statistics. + +For those that aren't used at all, it's an appropriate time to seek a business decision, to determine if it necessary to de-commission those reports to optimize your migration efforts. A key question worth asking when deciding to de-commission unused reports is: are they unused because people don't know they exist, or is it because they offer no business value, or have they been superseded by others? + +### Migrate reports based on business value + +Usage on its own isn't a clear indicator of business value. There needs to be a deeper business context to determine the value to the business. In an ideal world, we would like to know the contribution of the insights produced in a report to the bottom line of the business. That's exceedingly difficult to determine, since every decision made, and its dependency on the insights in a specific report, would need to be recorded along with the contribution that each decision makes to the bottom line of the business. You would also need to do this overtime. + +This level of detail is unlikely to be available in most organizations. One way in which you can get deeper on business value to drive migration order is to look at alignment with business strategy. A business strategy set by your executive typically lays out strategic business objectives, key performance indicators (KPIs), and KPI targets that need to be achieved and who is accountable for achieving them. In that sense, classifying your reports and dashboards by strategic business objectives—for example, reduce fraud, improve customer engagement, and optimize business operations—will help understand business purpose and show what objective(s), specific reports, and dashboards these are contributing to. Reports and dashboards associated with high priority objectives in the business strategy can then be highlighted so that migration is focused on delivering business value in a strategic high priority area. + +It's also worthwhile to classify reports and dashboards as operational, tactical, or strategic, to understand the level in the business where they're used. Delivering strategic business objectives contribution is required at all these levels. Knowing which reports and dashboards are used, at what level, and what objectives they're associated with, helps to focus migration on high priority business value that will drive the company forward. Business contribution of reports and dashboards is needed to understand this, perhaps like what is shown in the following **Business strategy objective** table. + +| **Level** | **Report / dashboard name** | **Business purpose** | **Department used** | **Usage frequency** | **Business priority** | +|-|-|-|-|-|-| +| **Strategic** | | | | | | +| **Tactical** | | | | | | +| **Operational** | | | | | | + +While this may seem too time consuming, you need a mechanism to understand the contribution of reports and dashboards to business value, whether you're migrating or not. Catalogs like Azure Data Catalog are becoming very important because they give you the ability to catalog reports and dashboards, automatically capture the metadata associated with them, and let business users tag and rate them to help you understand business value. + +### Migrate reports based on data migration strategy + +> [!TIP] +> Data migration strategy could also dictate which reports and visualizations get migrated first. + +If your migration strategy is based on migrating "data marts first", clearly, the order of data mart migration will have a bearing on which reports and dashboards can be migrated first to run on Azure Synapse. Again, this is likely to be a business-value-related decision. Prioritizing which data marts are migrated first reflects business priorities. Metadata discovery tools can help you here by showing you which reports rely on data in which data mart tables. + +## Migration incompatibility issues that can impact reports and visualizations + +When it comes to migrating to Azure Synapse, there are several things that can impact the ease of migration for reports, dashboards, and other visualizations. The ease of migration is affected by: + +- Incompatibilities that occur during schema migration between your legacy data warehouse and Azure Synapse. + +- Incompatibilities in SQL between your legacy data warehouse and Azure Synapse. + +### The impact of schema incompatibilities + +> [!TIP] +> Schema incompatibilities include legacy warehouse DBMS table types and data types that are unsupported on Azure Synapse. + +BI tool reports and dashboards, and other visualizations, are produced by issuing SQL queries that access physical tables and/or views in your data warehouse or data mart. When it comes to migrating your data warehouse or data mart schema to Azure Synapse, there may be incompatibilities that can impact reports and dashboards, such as: + +- Non-standard table types supported in your legacy data warehouse DBMS that don't have an equivalent in Azure Synapse. + +- Data types supported in your legacy data warehouse DBMS that don't have an equivalent in Azure Synapse. + +In many cases, where there are incompatibilities, there may be ways around them. For example, the data in unsupported table types can be migrated into a standard table with appropriate data types and indexed or partitioned on a date/time column. Similarly, it may be able to represent unsupported data types in another type of column and perform calculations in Azure Synapse to achieve the same. Either way, it will need refactoring. + +> [!TIP] +> Querying the system catalog of your legacy warehouse DBMS is a quick and straightforward way to identify schema incompatibilities with Azure Synapse. + +To identify reports and visualizations impacted by schema incompatibilities, run queries against the system catalog of your legacy data warehouse to identify tables with unsupported data types. Then use metadata from your BI tool or tools to identify reports that access these structures, to see what could be impacted. Obviously, this will depend on the legacy data warehouse DBMS you're migrating from. Find details of how to identify these incompatibilities in [Design and performance for Netezza migrations](1-design-performance-migration.md). + +The impact may be less than you think, because many BI tools don't support such data types. As a result, views may already exist in your legacy data warehouse that `CAST` unsupported data types to more generic types. + +### The impact of SQL incompatibilities and differences + +Additionally, any report, dashboard, or other visualization in an application or tool that makes use of proprietary SQL extensions associated with your legacy data warehouse DBMS, is likely to be impacted when migrating to Azure Synapse. This could happen because the BI tool or application: + +- Accesses legacy data warehouse DBMS views that include proprietary SQL functions that have no equivalent in Azure Synapse. + +- Issues SQL queries, which include proprietary SQL functions peculiar to the SQL dialect of your legacy data warehouse DBMS, that have no equivalent in Azure Synapse. + +### Gauge the impact of SQL incompatibilities on your reporting portfolio + +You can't rely on documentation associated with reports, dashboards, and other visualizations to gauge how big of an impact SQL incompatibility may have on the portfolio of embedded query services, reports, dashboards, and other visualizations you're intending to migrate to Azure Synapse. There must be a more precise way of doing that. + +#### Use EXPLAIN statements to find SQL incompatibilities + +> [!TIP] +> Gauge the impact of SQL incompatibilities by harvesting your DBMS log files and running `EXPLAIN` statements. + +One way is to get a hold of the SQL log files of your legacy data warehouse. Use a script to pull out a representative set of SQL statements into a file, prefix each SQL statement with an `EXPLAIN` statement, and then run all the `EXPLAIN` statements in Azure Synapse. Any SQL statements containing proprietary SQL extensions from your legacy data warehouse that are unsupported will be rejected by Azure Synapse when the `EXPLAIN` statements are executed. This approach would at least give you an idea of how significant or otherwise the use of incompatible SQL is. + +Metadata from your legacy data warehouse DBMS will also help you when it comes to views. Again, you can capture and view SQL statements, and `EXPLAIN` them as described previously to identify incompatible SQL in views. + +## Test report and dashboard migration to Azure Synapse Analytics + +> [!TIP] +> Test performance and tune to minimize compute costs. + +A key element in data warehouse migration is the testing of reports and dashboards against Azure Synapse to verify that the migration has worked. To do this, you need to define a series of tests and a set of required outcomes for each test that needs to be run to verify success. It's important to ensure that reports and dashboards are tested and compared across your existing and migrated data warehouse systems to: + +- Identify whether schema changes made during migration such as data types to be converted, have impacted reports in terms of ability to run, results, and corresponding visualizations. + +- Verify all users are migrated. + +- Verify all roles are migrated and users assigned to those roles. + +- Verify all data access security privileges are migrated to ensure access control list (ACL) migration. + +- Ensure consistent results of all known queries, reports, and dashboards. + +- Ensure that data and ETL migration is complete and error free. + +- Ensure data privacy is upheld. + +- Test performance and scalability. + +- Test analytical functionality. + +For information about how to migrate users, user groups, roles, and privileges, see the [Security, access, and operations for Netezza migrations](3-security-access-operations.md) which is part of this series of articles. + +> [!TIP] +> Build an automated test suite to make tests repeatable. + +It's also best practice to automate testing as much as possible, to make each test repeatable and to allow a consistent approach to evaluating results. This works well for known regular reports, and could be managed via [Synapse pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) orchestration. If you already have a suite of test queries in place for regression testing, you could use the testing tools to automate the post migration testing. + +> [!TIP] +> Leverage tools that can compare metadata lineage to verify results. + +Ad-hoc analysis and reporting are more challenging and requires a set of tests to be compiled to verify that results are consistent across your legacy data warehouse DBMS and Azure Synapse. If reports and dashboards are inconsistent, then having the ability to compare metadata lineage across original and migrated systems is extremely valuable during migration testing, as it can highlight differences and pinpoint where they occurred when these aren't easy to detect. This is discussed in more detail later in this article. + +In terms of security, the best way to do this is to create roles, assign access privileges to roles, and then attach users to roles. To access your newly migrated data warehouse, set up an automated process to create new users, and to do role assignment. To detach users from roles, you can follow the same steps. + +It's also important to communicate the cut-over to all users, so they know what's changing and what to expect. + +## Analyze lineage to understand dependencies between reports, dashboards, and data + +> [!TIP] +> Having access to metadata and data lineage from reports all the way back to data source is critical for verifying that migrated reports are working correctly. + +A critical success factor in migrating reports and dashboards is understanding lineage. Lineage is metadata that shows the journey that data has taken, so you can see the path from the report/dashboard all the way back to where the data originates. It shows how data has gone from point to point, its location in the data warehouse and/or data mart, and where it's used—for example, in what reports. It helps you understand what happens to data as it travels through different data stores—files and database—different ETL pipelines, and into reports. If business users have access to data lineage, it improves trust, breeds confidence, and enables more informed business decisions. + +> [!TIP] +> Tools that automate metadata collection and show end-to- end lineage in a multi-vendor environment are valuable when it comes to migration. + +In multi-vendor data warehouse environments, business analysts in BI teams may map out data lineage. For example, if you've Informatica for your ETL, Oracle for your data warehouse, and Tableau for reporting, each of which have their own metadata repository, figuring out where a specific data element in a report came from can be challenging and time consuming. + +To migrate seamlessly from a legacy data warehouse to Azure Synapse, end-to-end data lineage helps prove like-for-like migration when comparing reports and dashboards against your legacy environment. That means that metadata from several tools needs to be captured and integrated to show the end to end journey. Having access to tools that support automated metadata discovery and data lineage will let you see duplicate reports and ETL processes and reports that rely on data sources that are obsolete, questionable, or even non-existent. With this information, you can reduce the number of reports and ETL processes that you migrate. + +You can also compare end-to-end lineage of a report in Azure Synapse against the end-to-end lineage, for the same report in your legacy data warehouse environment, to see if there are any differences that have occurred inadvertently during migration. This helps enormously with testing and verifying migration success. + +Data lineage visualization not only reduces time, effort, and error in the migration process, but also enables faster execution of the migration project. + +By leveraging automated metadata discovery and data lineage tools that can compare lineage, you can verify if a report is produced using data migrated to Azure Synapse and if it's produced in the same way as in your legacy environment. This kind of capability also helps you determine: + +- What data needs to be migrated to ensure successful report and dashboard execution on Azure Synapse + +- What transformations have been and should be performed to ensure successful execution on Azure Synapse + +- How to reduce report duplication + +This substantially simplifies the data migration process, because the business will have a better idea of the data assets it has and what needs to be migrated to enable a solid reporting environment on Azure Synapse. + +> [!TIP] +> Azure Data Factory and several third-party ETL tools support lineage. + +Several ETL tools provide end-to-end lineage capability, and you may be able to make use of this via your existing ETL tool if you're continuing to use it with Azure Synapse. Microsoft [Synapse pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) lets you view lineage in mapping flows. Also, [Microsoft partners](../../partner/data-integration.md) provide automated metadata discovery, data lineage, and lineage comparison tools. + +## Migrate BI tool semantic layers to Azure Synapse Analytics + +> [!TIP] +> Some BI tools have semantic layers that simplify business user access to physical data structures in your data warehouse or data mart, like SAP Business Objects and IBM Cognos. + +Some BI tools have what is known as a semantic metadata layer. The role of this metadata layer is to simplify business user access to physical data structures in an underlying data warehouse or data mart database. It does this by providing high-level objects like dimensions, measures, hierarchies, calculated metrics, and joins. These objects use business terms familiar to business analysts and are mapped to the physical data structures in the data warehouse or data mart database. + +When it comes to data warehouse migration, changes to column names or table names may be forced upon you. For example, in Oracle, table names can have a "#". In Azure Synapse, the "#" is only allowed as a prefix to a table name to indicate a temporary table. Therefore, you may need to change a table name if migrating from Oracle. You may need to do rework to change mappings in such cases. + +A good way to get everything consistent across multiple BI tools is to create a universal semantic layer, using common data names for high-level objects like dimensions, measures, hierarchies, and joins, in a data virtualization server (as shown in the next diagram) that sits between applications, BI tools, and Azure Synapse. This allows you to set up everything once (instead of in every tool), including calculated fields, joins and mappings, and then point all BI tools at the data virtualization server. + +> [!TIP] +> Use data virtualization to create a common semantic layer to guarantee consistency across all BI tools in an Azure Synapse environment. + +In this way, you get consistency across all BI tools, while at the same time breaking the dependency between BI tools and applications, and the underlying physical data structures in Azure Synapse. Use [Microsoft partners](../../partner/data-integration.md) on Azure to implement this. The following diagram shows how a common vocabulary in the Data Virtualization server lets multiple BI tools see a common semantic layer. + +:::image type="content" source="../media/4-visualization-reporting/data-virtualization-semantics.png" border="true" alt-text="Diagram with common data names and definitions that relate to the data virtualization server."::: + +## Conclusions + +> [!TIP] +> Identify incompatibilities early to gauge the extent of the migration effort. Migrate your users, group roles and privilege assignments. Only migrate the reports and visualizations that are used and are contributing to business value. + +In a lift-and-shift data warehouse migration to Azure Synapse, most reports and dashboards should migrate easily. + +However, if data structures change, then data is stored in unsupported data types or access to data in the data warehouse or data mart is via a view that includes proprietary SQL that's unsupported in your Azure Synapse environment. You'll need to deal with those issues if they arise. + +You can't rely on documentation to find out where the issues are likely to be. Making use of `EXPLAIN` statements is a pragmatic and quick way to identify incompatibilities in SQL. Rework these to achieve similar results in Azure Synapse. In addition, it's recommended that you make use of automated metadata discovery and lineage tools to help you identify duplicate reports, reports that are no longer valid because they're using data from data sources that you no longer use, and to understand dependencies. Some of these tools help compare lineage to verify that reports running in your legacy data warehouse environment are produced identically in Azure Synapse. + +Don't migrate reports that you no longer use. BI tool usage data can help determine which ones aren't in use. For the visualizations and reports that you do want to migrate, migrate all users, user groups, roles, and privileges, and associate these reports with strategic business objectives and priorities to help you identify report insight contribution to specific objectives. This is useful if you're using business value to drive your report migration strategy. If you're migrating by data store,—data mart by data mart—then metadata will also help you identify which reports are dependent on which tables and views, so that you can focus on migrating to these first. + +Finally, consider data virtualization to shield BI tools and applications from structural changes to the data warehouse and/or the data mart data model that may occur during migration. You can also use a common vocabulary with data virtualization to define a common semantic layer that guarantees consistent common data names, definitions, metrics, hierarchies, joins, and more across all BI tools and applications in a migrated Azure Synapse environment. + +## Next steps + +To learn more about minimizing SQL issues, see the next article in this series: [Minimizing SQL issues for Netezza migrations](5-minimize-sql-issues.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/5-minimize-sql-issues.md b/articles/synapse-analytics/migration-guides/netezza/5-minimize-sql-issues.md new file mode 100644 index 000000000000..4489c9aa40db --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/5-minimize-sql-issues.md @@ -0,0 +1,268 @@ +--- +title: "Minimize SQL issues for Netezza migrations" +description: Learn how to minimize the risk of SQL issues when migrating from Netezza to Azure Synapse. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Minimize SQL issues for Netezza migrations + +This article is part five of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for minimizing SQL issues. + +## Overview + +### Characteristics of Netezza environments + +> [!TIP] +> Netezza pioneered the "data warehouse appliance" concept in the early 2000s. + +In 2003, Netezza initially released their data warehouse appliance product. It reduced the cost of entry and improved the ease-of-use of massively parallel processing (MPP) techniques to enable data processing at scale more efficiently than the existing mainframe or other MPP technologies available at the time. Since then, the product has evolved and has many installations among large financial institutions, telecommunications, and retail companies. The original implementation used proprietary hardware including field programmable gate arrays—or FPGAs—and was accessible via ODBC or JDBC network connection over TCP/IP. + +Most existing Netezza installations are on-premises, so many users are considering migrating some or all their Netezza data to Azure Synapse to gain the benefits of a move to a modern cloud environment. + +> [!TIP] +> Many existing Netezza installations are data warehouses using a dimensional data model. + +Netezza technology is often used to implement a data warehouse, supporting complex analytic queries on large data volumes using SQL. Dimensional data models—star or snowflake schemas—are common, as is the implementation of data marts for individual departments. + +This combination of SQL and dimensional data models simplifies migration to Azure Synapse, since the basic concepts and SQL skills are transferable. The recommended approach is to migrate the existing data model as-is to reduce risk and time taken. Even if the eventual intention is to make changes to the data model (for example, moving to a Data Vault model), perform an initial as-is migration and then make changes within the Azure cloud environment, leveraging the performance, elastic scalability, and cost advantages there. + +While the SQL language has been standardized, individual vendors have in some cases implemented proprietary extensions. This document highlights potential SQL differences you may encounter while migrating from a legacy Netezza environment, and to provide workarounds. + +### Use Azure Data Factory to implement a metadata-driven migration + +> [!TIP] +> Automate the migration process by using Azure Data Factory capabilities. + +Automate and orchestrate the migration process by making use of the capabilities in the Azure environment. This approach also minimizes the migration's impact on the existing Netezza environment, which may already be running close to full capacity. + +Azure Data Factory is a cloud-based data integration service that allows creation of data-driven workflows in the cloud for orchestrating and automating data movement and data transformation. Using Data Factory, you can create and schedule data-driven workflows—called pipelines—that can ingest data from disparate data stores. It can process and transform data by using compute services such as Azure HDInsight Hadoop, Spark, Azure Data Lake Analytics, and Azure Machine Learning. + +By creating metadata to list the data tables to be migrated and their location, you can use the Data Factory facilities to manage and automate parts of the migration process. You can also use [Synapse pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de). + +## SQL DDL differences between Netezza and Azure Synapse + +### SQL Data Definition Language (DDL) + +> [!TIP] +> SQL DDL commands `CREATE TABLE` and `CREATE VIEW` have standard core elements but are also used to define implementation-specific options. + +The ANSI SQL standard defines the basic syntax for DDL commands such as `CREATE TABLE` and `CREATE VIEW`. These commands are used within both Netezza and Azure Synapse, but they've also been extended to allow definition of implementation-specific features such as indexing, table distribution and partitioning options. + +The following sections discuss Netezza-specific options to consider during a migration to Azure Synapse. + +### Table considerations + +> [!TIP] +> Use existing indexes to give an indication of candidates for indexing in the migrated warehouse. + +When migrating tables between different technologies, only the raw data and its descriptive metadata gets physically moved between the two environments. Other database elements from the source system, such as indexes and log files, aren't directly migrated as these may not be needed or may be implemented differently within the new target environment. For example, the `TEMPORARY` option within Netezza's `CREATE TABLE` syntax is equivalent to prefixing the table name with a "#" character in Azure Synapse. + +It's important to understand where performance optimizations—such as indexes—were used in the source environment. This indicates where performance optimization can be added in the new target environment. For example, if zone maps were created in the source Netezza environment, this might indicate that a non-clustered index should be created in the migrated Azure Synapse. Other native performance optimization techniques, such as table replication, may be more applicable than a straight 'like for like' index creation. + +### Unsupported Netezza database object types + +> [!TIP] +> Netezza-specific features can be replaced by Azure Synapse features. + +Netezza implements some database objects that aren't directly supported in Azure Synapse, but there are methods to achieve the same functionality within the new environment: + +- Zone Maps—In Netezza, zone maps are automatically created and maintained for some column types and are used at query time to restrict the amount of data to be scanned. Zone Maps are created on the following column types: + - `INTEGER` columns of length 8 bytes or less. + - Temporal columns. For instance, `DATE`, `TIME`, and `TIMESTAMP`. + - `CHAR` columns, if these are part of a materialized view and mentioned in the `ORDER BY` clause. + + You can find out which columns have zone maps by using the `nz_zonemap` utility, which is part of the NZ Toolkit. Azure Synapse doesn't include zone maps, but you can achieve similar results by using other user-defined index types and/or partitioning. + +- Clustered Base tables (CBT)—In Netezza, CBTs are commonly used for fact tables, which can have billions of records. Scanning such a huge table requires a lot of processing time, since a full table scan might be needed to get relevant records. Organizing records on restrictive CBT via allows Netezza to group records in same or nearby extents. This process also creates zone maps that improve the performance by reducing the amount of data to be scanned. + + In Azure Synapse, you can achieve a similar effect by use of partitioning and/or use of other indexes. + +- Materialized views—Netezza supports materialized views and recommends creating one or more of these over large tables having many columns where only a few of those columns are regularly used in queries. The system automatically maintains materialized views when data in the base table is updated. + + Azure Synapse supports materialized views, with the same functionality as Netezza. + +### Netezza data type mapping + +> [!TIP] +> Assess the impact of unsupported data types as part of the preparation phase. + +Most Netezza data types have a direct equivalent in the Azure Synapse. The following table shows these data types along with the recommended approach for mapping them. + +| Netezza Data Type | Azure Synapse Data Type | +|--------------------------------|-------------------------------------| +| BIGINT | BIGINT | +| BINARY VARYING(n) | VARBINARY(n) | +| BOOLEAN | BIT | +| BYTEINT | TINYINT | +| CHARACTER VARYING(n) | VARCHAR(n) | +| CHARACTER(n) | CHAR(n) | +| DATE | DATE(DATE | +| DECIMAL(p,s) | DECIMAL(p,s) | +| DOUBLE PRECISION | FLOAT | +| FLOAT(n) | FLOAT(n) | +| INTEGER | INT | +| INTERVAL | INTERVAL data types aren't currently directly supported in Azure Synapse but can be calculated using temporal functions such as DATEDIFF | +| MONEY | MONEY | +| NATIONAL CHARACTER VARYING(n) | NVARCHAR(n) | +| NATIONAL CHARACTER(n) | NCHAR(n) | +| NUMERIC(p,s) | NUMERIC(p,s) | +| REAL | REAL | +| SMALLINT | SMALLINT | +| ST_GEOMETRY(n) | Spatial data types such as ST_GEOMETRY aren't currently supported in Azure Synapse, but the data could be stored as VARCHAR or VARBINARY | +| TIME | TIME | +| TIME WITH TIME ZONE | DATETIMEOFFSET | +| TIMESTAMP | DATETIME | + +### Data Definition Language (DDL) generation + +> [!TIP] +> Use existing Netezza metadata to automate the generation of `CREATE TABLE` and `CREATE VIEW DDL` for Azure Synapse. + +Edit existing Netezza `CREATE TABLE` and `CREATE VIEW` scripts to create the equivalent definitions with modified data types as described previously if necessary. Typically, this involves removing or modifying any extra Netezza-specific clauses such as `ORGANIZE ON`. + +However, all the information that specifies the current definitions of tables and views within the existing Netezza environment is maintained within system catalog tables. This is the best source of this information as it's guaranteed to be up to date and complete. Be aware that user-maintained documentation may not be in sync with the current table definitions. + +Access this information by using utilities such as `nz_ddl_table` and generate the `CREATE TABLE DDL` statements. Edit these statements for the equivalent tables in Azure Synapse. + +> [!TIP] +> Third-party tools and services can automate data mapping tasks. + +There are [Microsoft partners](../../partner/data-integration.md) who offer tools and services to automate migration, including data-type mapping. Also, if a third-party ETL tool such as Informatica or Talend is already in use in the Netezza environment, that tool can implement any required data transformations. + +## SQL DML differences between Netezza and Azure Synapse + +### SQL Data Manipulation Language (DML) + +> [!TIP] +> SQL DML commands `SELECT`, `INSERT` and `UPDATE` have standard core elements but may also implement different syntax options. + +The ANSI SQL standard defines the basic syntax for DML commands such as `SELECT`, `INSERT`, `UPDATE` and `DELETE`. Both Netezza and Azure Synapse use these commands, but in some cases there are implementation differences. + +The following sections discuss the Netezza-specific DML commands that you should consider during a migration to Azure Synapse. + +### SQL DML syntax differences + +Be aware of these differences in SQL Data Manipulation Language (DML) syntax between Netezza SQL and Azure Synapse when migrating: + +- `STRPOS`: In Netezza, the `STRPOS` function returns the position of a substring within a string. The equivalent function in Azure Synapse is `CHARINDEX`, with the order of the arguments reversed. For example, `SELECT STRPOS('abcdef','def')...` in Netezza is equivalent to `SELECT CHARINDEX('def','abcdef')...` in Azure Synapse. + +- `AGE`: Netezza supports the `AGE` operator to give the interval between two temporal values, such as timestamps or dates. For example, `SELECT AGE('23-03-1956','01-01-2019') FROM...`. In Azure Synapse, `DATEDIFF` gives the interval. For example, `SELECT DATEDIFF(day, '1956-03-26','2019-01-01') FROM...`. Note the date representation sequence. + +- `NOW()`: Netezza uses `NOW()` to represent `CURRENT_TIMESTAMP` in Azure Synapse. + +### Functions, stored procedures, and sequences + +> [!TIP] +> As part of the preparation phase, assess the number and type of non-data objects being migrated. + +When migrating from a mature legacy data warehouse environment such as Netezza, there are often elements other than simple tables and views that need to be migrated to the new target environment. Examples of this include functions, stored procedures, and sequences. + +As part of the preparation phase, create an inventory of the objects that need to be migrated and define the methods for handling them. Then assign an appropriate allocation of resources in the project plan. + +There may be facilities in the Azure environment that replace the functionality implemented as either functions or stored procedures in the Netezza environment. In this case, it's often more efficient to use the built-in Azure facilities rather than recoding the Netezza functions. + +> [!TIP] +> Third-party products and services can automate migration of non-data elements. + +[Microsoft partners](../../partner/data-integration.md) offer tools and services that can automate the migration, including the mapping of data types. Also, third-party ETL tools, such as Informatica or Talend, that are already in use in the IBM Netezza environment can implement any required data transformations. + +See the following sections for more information on each of these elements. + +#### Functions + +As with most database products, Netezza supports system functions and user-defined functions within the SQL implementation. When migrating to another database platform such as Azure Synapse, common system functions are available and can be migrated without change. Some system functions may have slightly different syntax, but the required changes can be automated. System functions where there's no equivalent, such arbitrary user-defined functions, may need to be recoded using the languages available in the target environment. Azure Synapse uses the popular Transact-SQL language to implement user-defined functions. Netezza user-defined functions are coded in nzlua or C++ languages. + +#### Stored procedures + +Most modern database products allow for procedures to be stored within the database. Netezza provides the NZPLSQL language, which is based on Postgres PL/pgSQL. A stored procedure typically contains SQL statements and some procedural logic, and may return data or a status. + +SQL Azure Data Warehouse also supports stored procedures using T-SQL, so if you must migrate stored procedures, recode them accordingly. + +#### Sequences + +In Netezza, a sequence is a named database object created via `CREATE SEQUENCE` that can provide the unique value via the `NEXT VALUE FOR` method. Use these to generate unique numbers for use as surrogate key values for primary key values. + +In Azure Synapse, there's no `CREATE SEQUENCE`. Sequences are handled using [Identity to create surrogate keys](../../sql-data-warehouse/sql-data-warehouse-tables-identity.md) or [managed identity](../../../data-factory/data-factory-service-identity.md?tabs=data-factory) using SQL code to create the next sequence number in a series. + +### Use [EXPLAIN](/sql/t-sql/queries/explain-transact-sql?msclkid=91233fc1cff011ec9dff597671b7ae97) to validate legacy SQL + +> [!TIP] +> Find potential migration issues by using real queries from the existing system query logs. + +Capture some representative SQL statements from the legacy query history logs to evaluate legacy Netezza SQL for compatibility with Azure Synapse. Then prefix those queries with `EXPLAIN` and—assuming a 'like for like' migrated data model in Azure Synapse with the same table and column names—run those `EXPLAIN` statements in Azure Synapse. Any incompatible SQL will return an error. Use this information to determine the scale of the recoding task. This approach doesn't require data to be loaded into the Azure environment, only that the relevant tables and views have been created. + +#### IBM Netezza to T-SQL mapping + +The IBM Netezza to T-SQL compliant with Azure Synapse SQL data type mapping is in this table: + +| IBM Netezza Data Type | Azure Synapse SQL Data Type | +|------------------------------------------------------|-----------------------------| +| array    | *Not supported* | +| bigint  | bigint | +| binary large object \[(n\[K\|M\|G\])\] | nvarchar \[(n\|max)\] | +| blob \[(n\[K\|M\|G\])\]  | nvarchar \[(n\|max)\] | +| byte \[(n)\] | binary \[(n)\]\|varbinary(max) | +| byteint    | smallint | +| char varying \[(n)\] | varchar \[(n\|max)\] | +| character varying \[(n)\] | varchar \[(n\|max)\] | +| char \[(n)\] | char \[(n)\]\|varchar(max) | +| character \[(n)\] | char \[(n)\]\|varchar(max) | +| character large object \[(n\[K\|M\|G\])\] | varchar \[(n\|max) | +| clob \[(n\[K\|M\|G\])\] | varchar \[(n\|max) | +| dataset    | *Not supported* | +| date  | date | +| dec \[(p\[,s\])\]    | decimal \[(p\[,s\])\] | +| decimal \[(p\[,s\])\]    | decimal \[(p\[,s\])\] | +| double precision    | float(53) | +| float \[(n)\]    | float \[(n)\] | +| graphic \[(n)\] | nchar \[(n)\]\| varchar(max) | +| interval  | *Not supported* | +| json \[(n)\]  | nvarchar \[(n\|max)\] | +| long varchar  | nvarchar(max) | +| long vargraphic  | nvarchar(max) | +| mbb  | *Not supported* | +| mbr  | *Not supported* | +| number \[((p\|\*)\[,s\])\]  | numeric \[(p\[,s\])\]  | +| numeric \[(p \[,s\])\]  | numeric \[(p\[,s\])\]  | +| period  | *Not supported* | +| real  | real | +| smallint  | smallint | +| st_geometry    | *Not supported* | +| time  | time | +| time with time zone  | datetimeoffset | +| timestamp  | datetime2  | +| timestamp with time zone  | datetimeoffset | +| varbyte  | varbinary \[(n\|max)\] | +| varchar \[(n)\] | varchar \[(n)\] | +| vargraphic \[(n)\] | nvarchar \[(n\|max)\] | +| varray  | *Not supported* | +| xml  | *Not supported* | +| xmltype  | *Not supported* | + +## Summary + +Typical existing legacy Netezza installations are implemented in a way that makes migration to Azure Synapse easy. They use SQL for analytical queries on large data volumes, and are in some form of dimensional data model. These factors make it a good candidate for migration to Azure Synapse. + +To minimize the task of migrating the actual SQL code, follow these recommendations: + +- Initial migration of the data warehouse should be as-is to minimize risk and time taken, even if the eventual final environment will incorporate a different data model such as Data Vault. + +- Understand the differences between Netezza SQL implementation and Azure Synapse. + +- Use metadata and query logs from the existing Netezza implementation to assess the impact of the differences and plan an approach to mitigate. + +- Automate the process wherever possible to minimize errors, risk, and time for the migration. + +- Consider using specialist [Microsoft partners](../../partner/data-integration.md) and services to streamline the migration. + +## Next steps + +To learn more about Microsoft and third-party tools, see the next article in this series: [Tools for Netezza data warehouse migration to Azure Synapse Analytics](6-microsoft-third-party-migration-tools.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/6-microsoft-third-party-migration-tools.md b/articles/synapse-analytics/migration-guides/netezza/6-microsoft-third-party-migration-tools.md new file mode 100644 index 000000000000..2400aa593c64 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/6-microsoft-third-party-migration-tools.md @@ -0,0 +1,132 @@ +--- +title: "Tools for Netezza data warehouse migration to Azure Synapse Analytics" +description: Learn about Microsoft and third-party data and database migration tools that can help you migrate from Netezza to Azure Synapse. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Tools for Netezza data warehouse migration to Azure Synapse Analytics + +This article is part six of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for Microsoft and third-party tools. + +## Data warehouse migration tools + +By migrating your existing data warehouse to Azure Synapse, you benefit from: + +- A globally secure, scalable, low-cost, cloud-native, pay-as-you-use analytical database. + +- The rich Microsoft analytical ecosystem that exists on Azure. This ecosystem consists of technologies to help modernize your data warehouse once it's migrated, and extends your analytical capabilities to drive new value. + +Several tools from Microsoft and third-party partner vendors can help you migrate your existing data warehouse to Azure Synapse. These tools include: + +- Microsoft data and database migration tools. + +- Third-party data warehouse automation tools to automate and document the migration to Azure Synapse. + +- Third-party data warehouse migration tools to migrate schema and data to Azure Synapse. + +- Third-party tools to minimize the impact on SQL differences between your existing data warehouse DBMS and Azure Synapse. + +The following sections discuss these tools in more detail. + +## Microsoft data migration tools + +> [!TIP] +> Data Factory includes tools to help migrate your data and your entire data warehouse to Azure. + +Microsoft offers several tools to help you migrate your existing data warehouse to Azure Synapse, such as: + +- Microsoft Azure Data Factory. + +- Microsoft services for physical data transfer. + +- Microsoft services for data ingestion. + +### Microsoft Azure Data Factory + +Microsoft Azure Data Factory is a fully managed, pay-as-you-use, hybrid data integration service for highly scalable ETL and ELT processing. It uses Spark to process and analyze data in parallel and in memory to maximize throughput. + +> [!TIP] +> Data Factory allows you to build scalable data integration pipelines code-free. + +[Azure Data Factory connectors](../../../data-factory/connector-overview.md?msclkid=00086e4acff211ec9263dee5c7eb6e69) connect to external data sources and databases and have templates for common data integration tasks. A visual front-end, browser-based UI enables non-programmers to create and run process pipelines to ingest, transform, and load data. More experienced programmers have the option to incorporate custom code, such as Python programs. + +> [!TIP] +> Data Factory enables collaborative development between business and IT professionals. + +Data Factory is also an orchestration tool. It's the best Microsoft tool to automate the end-to-end migration process to reduce risk and make the migration process easily repeatable. The following diagram shows a Data Factory mapping data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory mapping dataflow." lightbox="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png"::: + +The next screenshot shows a Data Factory wrangling data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png" border="true" alt-text="Screenshot showing an example of Azure Data Factory wrangling dataflows."::: + +You can develop simple or comprehensive ETL and ELT processes without coding or maintenance with a few clicks. These processes ingest, move, prepare, transform, and process your data. You can design and manage scheduling and triggers in Azure Data Factory to build an automated data integration and loading environment. In Data Factory, you can define, manage, and schedule PolyBase bulk data load processes. + +> [!TIP] +> Data Factory includes tools to help migrate your data and your entire data warehouse to Azure. + +You can use Data Factory to implement and manage a hybrid environment that includes on-premises, cloud, streaming and SaaS data—for example, from applications like Salesforce—in a secure and consistent way. + +A new capability in Data Factory is wrangling data flows. This opens up Data Factory to business users who want to visually discover, explore, and prepare data at scale without writing code. This capability, similar to Microsoft Excel Power Query or Microsoft Power BI Dataflows, offers self-service data preparation. Business users can prepare and integrate data through a spreadsheet style user interface with drop-down transform options. + +Azure Data Factory is the recommended approach for implementing data integration and ETL/ELT processes for an Azure Synapse environment, especially if existing legacy processes need to be refactored. + +### Microsoft services for physical data transfer + +> [!TIP] +> Microsoft offers a range of products and services to assist with data transfer. + +#### Azure ExpressRoute + +Azure ExpressRoute creates private connections between Azure data centers and infrastructure on your premises or in a collocation environment. ExpressRoute connections don't go over the public Internet, and they offer more reliability, faster speeds, and lower latencies than typical internet connections. In some cases, by using ExpressRoute connections to transfer data between on-premises systems and Azure, you gain significant cost benefits. + +#### AzCopy + +[AzCopy](../../../storage/common/storage-use-azcopy-v10.md) is a command line utility that copies files to Azure Blob Storage via a standard internet connection. In a warehouse migration project, you can use AzCopy to upload extracted, compressed, and delimited text files before loading through PolyBase, or a native Parquet reader if the exported files are Parquet format. AzCopy can upload individual files, file selections, or file directories. + +#### Azure Data Box + +Microsoft offers a service called Azure Data Box. This service writes data to be migrated to a physical storage device. This device is then shipped to an Azure data center and loaded into cloud storage. The service can be cost-effective for large volumes of data—for example, tens or hundreds of terabytes—or where network bandwidth isn't readily available. Azure Data Box is typically used for one-off historical data load when migrating a large amount of data to Azure Synapse. + +Another service is Data Box Gateway, a virtualized cloud storage gateway device that resides on your premises and sends your images, media, and other data to Azure. Use Data Box Gateway for one-off migration tasks or ongoing incremental data uploads. + +### Microsoft services for data ingestion + +#### COPY INTO + +The [COPY](/sql/t-sql/statements/copy-into-transact-sql) statement provides the most flexibility for high-throughput data ingestion into Azure Synapse Analytics. Refer to the list of capabilities that `COPY` offers for data ingestion. + +#### PolyBase + +> [!TIP] +> PolyBase can load data in parallel from Azure Blob Storage into Azure Synapse. + +PolyBase provides the fastest and most scalable method of loading bulk data into Azure Synapse. PolyBase leverages the MPP architecture to use parallel loading, to give the fastest throughput, and can read data from flat files in Azure Blob Storage or directly from external data sources and other relational databases via connectors. + +PolyBase can also directly read from files compressed with gzip—this reduces the physical volume of data moved during the load process. PolyBase supports popular data formats such as delimited text, ORC and Parquet. + +> [!TIP] +> Invoke PolyBase from Azure Data Factory as part of a migration pipeline. + +PolyBase is tightly integrated with Azure Data Factory to enable data load ETL/ELT processes to be rapidly developed and scheduled through a visual GUI, leading to higher productivity and fewer errors than hand-written code. + +PolyBase is the recommended data load method for Azure Synapse, especially for high-volume data. PolyBase loads data using the `CREATE TABLE AS` or `INSERT...SELECT` statements—CTAS achieves the highest possible throughput as it minimizes the amount of logging required. Compressed delimited text files are the most efficient input format. For maximum throughput, split very large input files into multiple smaller files and load these in parallel. For fastest loading to a staging table, define the target table as type `HEAP` and use round-robin distribution. + +However, PolyBase has some limitations. Rows to be loaded must be less than 1 MB in length. Fixed-width format or nested data, such as JSON and XML, aren't directly readable. + +## Microsoft partners can help you migrate your data warehouse to Azure Synapse Analytics + +In addition to tools that can help you with various aspects of data warehouse migration, there are several practiced [Microsoft partners](../../partner/data-integration.md) that can bring their expertise to help you move your legacy on-premises data warehouse platform to Azure Synapse. + +## Next steps + +To learn more about implementing modern data warehouses, see the next article in this series: [Beyond Netezza migration, implementing a modern data warehouse in Microsoft Azure](7-beyond-data-warehouse-migration.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/netezza/7-beyond-data-warehouse-migration.md b/articles/synapse-analytics/migration-guides/netezza/7-beyond-data-warehouse-migration.md new file mode 100644 index 000000000000..d4b3f9b8e7de --- /dev/null +++ b/articles/synapse-analytics/migration-guides/netezza/7-beyond-data-warehouse-migration.md @@ -0,0 +1,375 @@ +--- +title: "Beyond Netezza migration, implementing a modern data warehouse in Microsoft Azure" +description: Learn how a Netezza migration to Azure Synapse lets you integrate your data warehouse with the Microsoft Azure analytical ecosystem. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Beyond Netezza migration, implementing a modern data warehouse in Microsoft Azure + +This article is part seven of a seven part series that provides guidance on how to migrate from Netezza to Azure Synapse Analytics. This article provides best practices for implementing modern data warehouses. + +## Beyond data warehouse migration to Azure + +One of the key reasons to migrate your existing data warehouse to Azure Synapse is to utilize a globally secure, scalable, low-cost, cloud-native, pay-as-you-use analytical database. Azure Synapse also lets you integrate your migrated data warehouse with the complete Microsoft Azure analytical ecosystem to take advantage of, and integrate with, other Microsoft technologies that help you modernize your migrated data warehouse. This includes integrating with technologies like: + +- Azure Data Lake Storage—for cost effective data ingestion, staging, cleansing and transformation to free up data warehouse capacity occupied by fast growing staging tables + +- Azure Data Factory—for collaborative IT and self-service data integration [with connectors](../../../data-factory/connector-overview.md) to cloud and on-premises data sources and streaming data + +- [The Open Data Model Common Data Initiative](/common-data-model/)—to share consistent trusted data across multiple technologies including: + - Azure Synapse + - Azure Synapse Spark + - Azure HDInsight + - Power BI + - SAP + - Adobe Customer Experience Platform + - Azure IoT + - Microsoft ISV Partners + +- [Microsoft's data science technologies](/azure/architecture/data-science-process/platforms-and-tools) including: + - Azure ML studio + - Azure Machine Learning Service + - Azure Synapse Spark (Spark as a service) + - Jupyter Notebooks + - RStudio + - ML.NET + - Visual Studio .NET for Apache Spark to enable data scientists to use Azure Synapse data to train machine learning models at scale. + +- [Azure HDInsight](../../../hdinsight/index.yml)—to leverage big data analytical processing and join big data with Azure Synapse data by creating a Logical Data Warehouse using PolyBase + +- [Azure Event Hubs](../../../event-hubs/event-hubs-about.md), [Azure Stream Analytics](../../../stream-analytics/stream-analytics-introduction.md) and [Apache Kafka](/azure/databricks/spark/latest/structured-streaming/kafka)—to integrate with live streaming data from within Azure Synapse + +There's often acute demand to integrate with [Machine Learning](../../machine-learning/what-is-machine-learning.md) to enable custom built, trained machine learning models for use in Azure Synapse. This would enable in-database analytics to run at scale in-batch, on an event-driven basis and on-demand. The ability to exploit in-database analytics in Azure Synapse from multiple BI tools and applications also guarantees that all get the same predictions and recommendations. + +In addition, there's an opportunity to integrate Azure Synapse with Microsoft partner tools on Azure to shorten time to value. + +Let's look at these in more detail to understand how you can take advantage of the technologies in Microsoft's analytical ecosystem to modernize your data warehouse once you've migrated to Azure Synapse. + +## Offload data staging and ETL processing to Azure Data Lake and Azure Data Factory + +Enterprises today have a key problem resulting from digital transformation. So much new data is being generated and captured for analysis, and much of this data is finding its way into data warehouses. A good example is transaction data created by opening online transaction processing (OLTP) systems to self-service access from mobile devices. These OLTP systems are the main sources of data to a data warehouse, and with customers now driving the transaction rate rather than employees, data in data warehouse staging tables has been growing rapidly in volume. + +The rapid influx of data into the enterprise, along with new sources of data like Internet of Things (IoT), means that companies need to find a way to deal with unprecedented data growth and scale data integration ETL processing beyond current levels. One way to do this is to offload ingestion, data cleansing, transformation and integration to a data lake and process it at scale there, as part of a data warehouse modernization program. + +Once you've migrated your data warehouse to Azure Synapse, Microsoft provides the ability to modernize your ETL processing by ingesting data into, and staging data in, Azure Data Lake Storage. You can then clean, transform and integrate your data at scale using Data Factory before loading it into Azure Synapse in parallel using PolyBase. + +For ELT strategies, consider offloading ELT processing to Azure Data Lake to easily scale as your data volume or frequency grows. + +### Microsoft Azure Data Factory + +> [!TIP] +> Data Factory allows you to build scalable data integration pipelines code free. + +[Microsoft Azure Data Factory](https://azure.microsoft.com/services/data-factory/) is a pay-as-you-use, hybrid data integration service for highly scalable ETL and ELT processing. Data Factory provides a simple web-based user interface to build data integration pipelines, in a code-free manner that can: + +- Data Factory allows you to build scalable data integration pipelines code free. Easily acquire data at scale. Pay only for what you use and connect to on premises, cloud, and SaaS based data sources. + +- Ingest, move, clean, transform, integrate, and analyze cloud and on-premises data at scale and take automatic action such a recommendation, an alert, and more. + +- Seamlessly author, monitor and manage pipelines that span data stores both on-premises and in the cloud. + +- Enable pay-as-you-go scale out in alignment with customer growth. + +> [!TIP] +> Data Factory can connect to on-premises, cloud, and SaaS data. + +All of this can be done without writing any code. However, adding custom code to Data Factory pipelines is also supported. The next screenshot shows an example Data Factory pipeline. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory pipeline."::: + +> [!TIP] +> Pipelines called data factories control the integration and analysis of data. Data Factory is enterprise class data integration software aimed at IT professionals with a data wrangling facility for business users. + +Implement Data Factory pipeline development from any of several places including: + +- Microsoft Azure portal + +- Microsoft Azure PowerShell + +- Programmatically from .NET and Python using a multi-language SDK + +- Azure Resource Manager (ARM) Templates + +- REST APIs + +Developers and data scientists who prefer to write code can easily author Data Factory pipelines in Java, Python, and .NET using the software development kits (SDKs) available for those programming languages. Data Factory pipelines can also be hybrid as they can connect, ingest, clean, transform and analyze data in on-premises data centers, Microsoft Azure, other clouds, and SaaS offerings. + +Once you develop Data Factory pipelines to integrate and analyze data, deploy those pipelines globally and schedule them to run in batch, invoke them on demand as a service, or run them in real time on an event-driven basis. A Data Factory pipeline can also run on one or more execution engines and monitor pipeline execution to ensure performance and track errors. + +#### Use cases + +> [!TIP] +> Build data warehouses on Microsoft Azure. + +Data Factory can support multiple use cases, including: + +- Preparing, integrating, and enriching data from cloud and on-premises data sources to populate your migrated data warehouse and data marts on Microsoft Azure Synapse. + +- Preparing, integrating, and enriching data from cloud and on-premises data sources to produce training data for use in machine learning model development and in retraining analytical models. + +- Orchestrating data preparation and analytics to create predictive and prescriptive analytical pipelines for processing and analyzing data in batch, such as sentiment analytics, and either acting on the results of the analysis or populating your data warehouse with the results. + +- Preparing, integrating, and enriching data for data-driven business applications running on the Azure cloud on top of operational data stores like Azure Cosmos DB. + +> [!TIP] +> Build training data sets in data science to develop machine learning models. + +#### Data sources + +Azure Data Factory lets you use [connectors](../../../data-factory/connector-overview.md) from both cloud and on-premises data sources. Agent software, known as a *self-hosted integration runtime*, securely accesses on-premises data sources and supports secure, scalable data transfer. + +#### Transform data using Azure Data Factory + +> [!TIP] +> Professional ETL developers can use Azure Data Factory mapping data flows to clean, transform and integrate data without the need to write code. + +Within a Data Factory pipeline, ingest, clean, transform, integrate, and, if necessary, analyze any type of data from these sources. This includes structured, semi-structured—such as JSON or Avro—and unstructured data. + +Professional ETL developers can use Data Factory mapping data flows to filter, split, join (many types), lookup, pivot, unpivot, sort, union, and aggregate data without writing any code. In addition, Data Factory supports surrogate keys, multiple write processing options such as insert, upsert, update, table recreation, and table truncation, and several types of target data stores—also known as sinks. ETL developers can also create aggregations, including time series aggregations that require a window to be placed on data columns. + +> [!TIP] +> Data Factory supports the ability to automatically detect and manage schema changes in inbound data, such as in streaming data. + +Run mapping data flows that transform data as activities in a Data Factory pipeline. Include multiple mapping data flows in a single pipeline, if necessary. Break up challenging data transformation and integration tasks into smaller mapping dataflows that can be combined to handle the complexity and custom code added if necessary. In addition to this functionality, Data Factory mapping data flows include these abilities: + +- Define expressions to clean and transform data, compute aggregations, and enrich data. For example, these expressions can perform feature engineering on a date field to break it into multiple fields to create training data during machine learning model development. Construct expressions from a rich set of functions that include mathematical, temporal, split, merge, string concatenation, conditions, pattern match, replace, and many other functions. + +- Automatically handle schema drift so that data transformation pipelines can avoid being impacted by schema changes in data sources. This is especially important for streaming IoT data, where schema changes can happen without notice when devices are upgraded or when readings are missed by gateway devices collecting IoT data. + +- Partition data to enable transformations to run in parallel at scale. + +- Inspect data to view the metadata of a stream you're transforming. + +> [!TIP] +> Data Factory can also partition data to enable ETL processing to run at scale. + +The next screenshot shows an example Data Factory mapping data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory mapping dataflow." lightbox="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png"::: + +Data engineers can profile data quality and view the results of individual data transforms by switching on a debug capability during development. + +> [!TIP] +> Data Factory pipelines are also extensible since Data Factory allows you to write your own code and run it as part of a pipeline. + +Extend Data Factory transformational and analytical functionality by adding a linked service containing your own code into a pipeline. For example, an Azure Synapse Spark Pool notebook containing Python code could use a trained model to score the data integrated by a mapping data flow. + +Store integrated data and any results from analytics included in a Data Factory pipeline in one or more data stores such as Azure Data Lake storage, Azure Synapse, or Azure HDInsight (Hive Tables). Invoke other activities to act on insights produced by a Data Factory analytical pipeline. + +#### Utilize Spark to scale data integration + +Under the covers, Data Factory utilizes Azure Synapse Spark Pools—Microsoft's Spark-as-a-service offering—at run time to clean and integrate data on the Microsoft Azure cloud. This enables it to clean, integrate, and analyze high-volume and very high-velocity data (such as click stream data) at scale. Microsoft intends to execute Data Factory pipelines on other Spark distributions. In addition to executing ETL jobs on Spark, Data Factory can also invoke Pig scripts and Hive queries to access and transform data stored in Azure HDInsight. + +#### Link self-service data prep and Data Factory ETL processing using wrangling data flows + +> [!TIP] +> Data Factory support for wrangling data flows in addition to mapping data flows means that business and IT can work together on a common platform to integrate data. + +Another new capability in Data Factory is wrangling data flows. This lets business users (also known as citizen data integrators and data engineers) make use of the platform to visually discover, explore and prepare data at scale without writing code. This easy-to-use Data Factory capability is similar to Microsoft Excel Power Query or Microsoft Power BI Dataflows, where self-service data preparation business users use a spreadsheet-style UI with drop-down transforms to prepare and integrate data. The following screenshot shows an example Data Factory wrangling data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png" border="true" alt-text="Screenshot showing an example of Azure Data Factory wrangling dataflows."::: + +This differs from Excel and Power BI, as Data Factory wrangling data flows uses Power Query Online to generate M code and translate it into a massively parallel in-memory Spark job for cloud scale execution. The combination of mapping data flows and wrangling data flows in Data Factory lets IT professional ETL developers and business users collaborate to prepare, integrate, and analyze data for a common business purpose. The preceding Data Factory mapping data flow diagram shows how both Data Factory and Azure Synapse Spark Pool Notebooks can be combined in the same Data Factory pipeline. This allows IT and business to be aware of what each has created. Mapping data flows and wrangling data flows can then be available for reuse to maximize productivity and consistency and minimize reinvention. + +#### Link data and analytics in analytical pipelines + +In addition to cleaning and transforming data, Azure Data Factory can combine data integration and analytics in the same pipeline. Use Data Factory to create both data integration and analytical pipelines—the latter being an extension of the former. Drop an analytical model into a pipeline so that clean, integrated data can be stored to provide predictions or recommendations. Act on this information immediately or store it in your data warehouse to provide you with new insights and recommendations that can be viewed in BI tools. + +Models developed code-free with Azure ML Studio or with the Azure Machine Learning Service SDK using Azure Synapse Spark Pool Notebooks or using R in RStudio can be invoked as a service from within a Data Factory pipeline to batch score your data. Analysis happens at scale by executing Spark machine learning pipelines on Azure Synapse Spark Pool Notebooks. + +Store integrated data and any results from analytics included in a Data Factory pipeline in one or more data stores, such as Azure Data Lake storage, Azure Synapse, or Azure HDInsight (Hive Tables). Invoke other activities to act on insights produced by a Data Factory analytical pipeline. + +## A lake database to share consistent trusted data + +> [!TIP] +> Microsoft has created a lake database to describe core data entities to be shared across the enterprise. + +A key objective in any data integration set-up is the ability to integrate data once and reuse it everywhere, not just in a data warehouse—for example, in data science. Reuse avoids reinvention and ensures consistent, commonly understood data that everyone can trust. + +> [!TIP] +> Azure Data Lake is shared storage that underpins Microsoft Azure Synapse, Azure ML, Azure Synapse Spark, and Azure HDInsight. + +To achieve this goal, establish a set of common data names and definitions describing logical data entities that need to be shared across the enterprise—such as customer, account, product, supplier, orders, payments, returns, and so forth. Once this is done, IT and business professionals can use data integration software to create these common data assets and store them to maximize their reuse to drive consistency everywhere. + +> [!TIP] +> Integrating data to create lake database logical entities in shared storage enables maximum reuse of common data assets. + +Microsoft has done this by creating a [lake database](../../database-designer/concepts-lake-database.md). The lake database is a common language for business entities that represents commonly used concepts and activities across a business. Azure Synapse Analytics provides industry specific database templates to help standardize data in the lake. [Lake database templates](../../database-designer/concepts-database-templates.md) provide schemas for predefined business areas, enabling data to the loaded into a lake database in a structured way. The power comes when data integration software is used to create lake database common data assets. This results in self-describing trusted data that can be consumed by applications and analytical systems. Create a lake database in Azure Data Lake storage using Azure Data Factory, and consume it with Power BI, Azure Synapse Spark, Azure Synapse and Azure ML. The following diagram shows a lake database used in Azure Synapse Analytics. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png" border="true" alt-text="Screenshot showing how a lake database can be used in Azure Synapse Analytics."::: + +## Integration with Microsoft data science technologies on Azure + +Another key requirement in modernizing your migrated data warehouse is to integrate it with Microsoft and third-party data science technologies on Azure to produce insights for competitive advantage. Let's look at what Microsoft offers in terms of machine learning and data science technologies and see how these can be used with Azure Synapse in a modern data warehouse environment. + +### Microsoft technologies for data science on Azure + +> [!TIP] +> Develop machine learning models using a no/low code approach or from a range of programming languages like Python, R and .NET. + +Microsoft offers a range of technologies to build predictive analytical models using machine learning, analyze unstructured data using deep learning, and perform other kinds of advanced analytics. This includes: + +- Azure ML Studio + +- Azure Machine Learning Service + +- Azure Synapse Spark Pool Notebooks + +- ML.NET (API, CLI or .NET Model Builder for Visual Studio) + +- Visual Studio .NET for Apache Spark + +Data scientists can use RStudio (R) and Jupyter Notebooks (Python) to develop analytical models, or they can use other frameworks such as Keras or TensorFlow. + +#### Azure ML Studio + +Azure ML Studio is a fully managed cloud service that lets you easily build, deploy, and share predictive analytics via a drag-and-drop web-based user interface. The next screenshot shows an Azure Machine Learning studio user interface. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png" border="true" alt-text="Screenshot showing predictive analysis in the Azure Machine Learning studio user interface."::: + +#### Azure Machine Learning Service + +> [!TIP] +> Azure Machine Learning Service provides an SDK for developing machine learning models using several open-source frameworks. + +Azure Machine Learning Service provides a software development kit (SDK) and services for Python to quickly prepare data, as well as train and deploy machine learning models. Use Azure Machine Learning Service from Azure notebooks (a Jupyter notebook service) and utilize open-source frameworks, such as PyTorch, TensorFlow, Spark MLlib (Azure Synapse Spark Pool Notebooks), or scikit-learn. Azure Machine Learning Service provides an AutoML capability that automatically identifies the most accurate algorithms to expedite model development. You can also use it to build machine learning pipelines that manage end-to-end workflow, programmatically scale on the cloud, and deploy models both to the cloud and the edge. Azure Machine Learning Service uses logical containers called workspaces, which can be either created manually from the Azure portal or created programmatically. These workspaces keep compute targets, experiments, data stores, trained machine learning models, docker images, and deployed services all in one place to enable teams to work together. Use Azure Machine Learning Service from Visual Studio with a Visual Studio for AI extension. + +> [!TIP] +> Organize and manage related data stores, experiments, trained models, docker images and deployed services in workspaces. + +#### Azure Synapse Spark Pool Notebooks + +> [!TIP] +> Azure Synapse Spark is Microsoft's dynamically scalable Spark-as-a-service offering scalable execution of data preparation, model development and deployed model execution. + +[Azure Synapse Spark Pool Notebooks](../../spark/apache-spark-development-using-notebooks.md?msclkid=cbe4b8ebcff511eca068920ea4bf16b9) is an Apache Spark service optimized to run on Azure which: + +- Allows data engineers to build and execute scalable data preparation jobs using Azure Data Factory + +- Allows data scientists to build and execute machine learning models at scale using notebooks written in languages such as Scala, R, Python, Java, and SQL; and to visualize results + +> [!TIP] +> Azure Synapse Spark can access data in a range of Microsoft analytical ecosystem data stores on Azure. + +Jobs running in Azure Synapse Spark Pool Notebook can retrieve, process, and analyze data at scale from Azure Blob Storage, Azure Data Lake Storage, Azure Synapse, Azure HDInsight, and streaming data services such as Kafka. + +Autoscaling and auto-termination are also supported to reduce total cost of ownership (TCO). Data scientists can use the ML flow open-source framework to manage the machine learning lifecycle. + +#### ML.NET + +> [!TIP] +> Microsoft has extended its machine learning capability to .NET developers. + +ML.NET is an open-source and cross-platform machine learning framework (Windows, Linux, macOS), created by Microsoft for .NET developers so that they can use existing tools—like .NET Model Builder for Visual Studio—to develop custom machine learning models and integrate them into .NET applications. + +#### Visual Studio .NET for Apache Spark + +Visual Studio .NET for Apache® Spark™ aims to make Spark accessible to .NET developers across all Spark APIs. It takes Spark support beyond R, Scala, Python, and Java to .NET. While initially only available on Apache Spark on HDInsight, Microsoft intends to make this available on Azure Synapse Spark Pool Notebook. + +### Utilize Azure Analytics with your data warehouse + +> [!TIP] +> Train, test, evaluate, and execute machine learning models at scale on Azure Synapse Spark Pool Notebook using data in your Azure Synapse. + +Combine machine learning models built using the tools with Azure Synapse by: + +- Using machine learning models in batch mode or in real time to produce new insights, and add them to what you already know in Azure Synapse. + +- Using the data in Azure Synapse to develop and train new predictive models for deployment elsewhere, such as in other applications. + +- Deploying machine learning models—including those trained elsewhere—in Azure Synapse to analyze data in the data warehouse and drive new business value. + +> [!TIP] +> Produce new insights using machine learning on Azure in batch or in real-time and add to what you know in your data warehouse. + +In terms of machine learning model development, data scientists can use RStudio, Jupyter notebooks, and Azure Synapse Spark Pool notebooks together with Microsoft Azure Machine Learning Service to develop machine learning models that run at scale on Azure Synapse Spark Pool Notebooks using data in Azure Synapse. For example, they could create an unsupervised model to segment customers for use in driving different marketing campaigns. Use supervised machine learning to train a model to predict a specific outcome, such as predicting a customer's propensity to churn, or recommending the next best offer for a customer to try to increase their value. The next diagram shows how Azure Synapse Analytics can be leveraged for Machine Learning. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png" border="true" alt-text="Screenshot of an Azure Synapse Analytics train and predict model."::: + +In addition, you can ingest big data—such as social network data or review website data—into Azure Data Lake, then prepare and analyze it at scale on Azure Synapse Spark Pool Notebook, using natural language processing to score sentiment about your products or your brand. Add these scores to your data warehouse to understand the impact of—for example—negative sentiment on product sales, and to leverage big data analytics to add to what you already know in your data warehouse. + +## Integrate live streaming data into Azure Synapse Analytics + +When analyzing data in a modern data warehouse, you must be able to analyze streaming data in real time and join it with historical data in your data warehouse. An example of this would be combining IoT data with product or asset data. + +> [!TIP] +> Integrate your data warehouse with streaming data from IoT devices or clickstream. + +Once you've successfully migrated your data warehouse to Azure Synapse, you can introduce this capability as part of a data warehouse modernization exercise. Do this by taking advantage of additional functionality in Azure Synapse. + +> [!TIP] +> Ingest streaming data into Azure Data Lake Storage from Microsoft Event Hub or Kafka, and access it from Azure Synapse using PolyBase external tables. + +To do this, ingest streaming data via Microsoft Event Hubs or other technologies, such as Kafka, using Azure Data Factory (or using an existing ETL tool if it supports the streaming data sources) and land it in Azure Data Lake Storage (ADLS). Next, create an external table in Azure Synapse using PolyBase and point it at the data being streamed into Azure Data Lake. Your migrated data warehouse will now contain new tables that provide access to real-time streaming data. Query this external table as if the data was in the data warehouse via standard TSQL from any BI tool that has access to Azure Synapse. You can also join this data to other tables containing historical data and create views that join live streaming data to historical data to make it easier for business users to access. In the following diagram, a real-time data warehouse on Azure Synapse analytics is integrated with streaming data in Azure Data Lake. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png" border="true" alt-text="Screenshot of Azure Synapse Analytics with streaming data in an Azure Data Lake."::: + +## Create a logical data warehouse using PolyBase + +> [!TIP] +> PolyBase simplifies access to multiple underlying analytical data stores on Azure to simplify access for business users. + +PolyBase offers the capability to create a logical data warehouse to simplify user access to multiple analytical data stores. + +This is attractive because many companies have adopted 'workload optimized' analytical data stores over the last several years in addition to their data warehouses. Examples of these platforms on Azure include: + +- Azure Data Lake Storage with Azure Synapse Spark Pool Notebook (Spark-as-a-service), for big data analytics + +- Azure HDInsight (Hadoop as-a-service), also for big data analytics + +- NoSQL Graph databases for graph analysis, which could be done in Azure Cosmos DB + +- Azure Event Hubs and Azure Stream Analytics, for real-time analysis of data in motion + +You may have non-Microsoft equivalents of some of these. You may also have a master data management (MDM) system that needs to be accessed for consistent trusted data on customers, suppliers, products, assets, and more. + +These additional analytical platforms have emerged because of the explosion of new data sources—both inside and outside the enterprises—that business users want to capture and analyze. Examples include: + +- Machine generated data, such as IoT sensor data and clickstream data. + +- Human generated data, such as social network data, review web site data, customer in-bound email, image, and video. + +- Other external data, such as open government data and weather data. + +This data is over and above the structured transaction data and master data sources that typically feed data warehouses. These new data sources include semi-structured data (like JSON, XML, or Avro) or unstructured data (like text, voice, image, or video) which is more complex to process and analyze. This data could be very high volume, high velocity, or both. + +As a result, the need for new kinds of more complex analysis has emerged, such as natural language processing, graph analysis, deep learning, streaming analytics, or complex analysis of large volumes of structured data. All of this is typically not happening in a data warehouse, so it's not surprising to see different analytical platforms for different types of analytical workloads, as shown in this diagram. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png" border="true" alt-text="Screenshot of different analytical platforms for different types of analytical workloads in Azure Synapse Analytics."::: + +Since these platforms are producing new insights, it's normal to see a requirement to combine these insights with what you already know in Azure Synapse. That's what PolyBase makes possible. + +> [!TIP] +> The ability to make data in multiple analytical data stores look like it's all in one system and join it to Azure Synapse is known as a logical data warehouse architecture. + +By leveraging PolyBase data virtualization inside Azure Synapse, you can implement a logical data warehouse. Join data in Azure Synapse to data in other Azure and on-premises analytical data stores—like Azure HDInsight or Cosmos DB—or to streaming data flowing into Azure Data Lake storage from Azure Stream Analytics and Event Hubs. Users access external tables in Azure Synapse, unaware that the data they're accessing is stored in multiple underlying analytical systems. The next diagram shows the complex data warehouse structure accessed through comparatively simpler but still powerful user interface methods. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png" alt-text="Screenshot showing an example of a complex data warehouse structure accessed through user interface methods."::: + +The previous diagram shows how other technologies of the Microsoft analytical ecosystem can be combined with the capability of Azure Synapse logical data warehouse architecture. For example, data can be ingested into Azure Data Lake Storage (ADLS) and curated using Azure Data Factory to create trusted data products that represent Microsoft [lake database](../../database-designer/concepts-lake-database.md) logical data entities. This trusted, commonly understood data can then be consumed and reused in different analytical environments such as Azure Synapse, Azure Synapse Spark Pool Notebooks, or Azure Cosmos DB. All insights produced in these environments are accessible via a logical data warehouse data virtualization layer made possible by PolyBase. + +> [!TIP] +> A logical data warehouse architecture simplifies business user access to data and adds new value to what you already know in your data warehouse. + +## Conclusions + +> [!TIP] +> Migrating your data warehouse to Azure Synapse lets you make use of a rich Microsoft analytical ecosystem running on Azure. + +Once you migrate your data warehouse to Azure Synapse, you can leverage other technologies in the Microsoft analytical ecosystem. You can't only modernize your data warehouse, but combine insights produced in other Azure analytical data stores into an integrated analytical architecture. + +Broaden your ETL processing to ingest data of any type into Azure Data Lake Storage. Prepare and integrate it at scale using Azure Data Factory to produce trusted, commonly understood data assets that can be consumed by your data warehouse and accessed by data scientists and other applications. Build real-time and batch-oriented analytical pipelines and create machine learning models to run in batch, in-real-time on streaming data and on-demand as a service. + +Leverage PolyBase and `COPY INTO` to go beyond your data warehouse. Simplify access to insights from multiple underlying analytical platforms on Azure by creating holistic integrated views in a logical data warehouse. Easily access streaming, big data, and traditional data warehouse insights from BI tools and applications to drive new value in your business. + +## Next steps + +To learn more about migrating to a dedicated SQL pool, see [Migrate a data warehouse to a dedicated SQL pool in Azure Synapse Analytics](../migrate-to-synapse-analytics-guide.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/1-design-performance-migration.md b/articles/synapse-analytics/migration-guides/teradata/1-design-performance-migration.md new file mode 100644 index 000000000000..69411724c4eb --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/1-design-performance-migration.md @@ -0,0 +1,336 @@ +--- +title: "Design and performance for Teradata migrations" +description: Learn how Teradata and Azure Synapse SQL databases differ in their approach to high query performance on exceptionally large data volumes. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Design and performance for Teradata migrations + +This article is part one of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for design and performance. + +## Overview + +> [!TIP] +> More than just a database—the Azure environment includes a comprehensive set of capabilities and tools. + +Many existing users of Teradata data warehouse systems want to take advantage of the innovations provided by newer environments such as cloud, IaaS, or PaaS, and to delegate tasks like infrastructure maintenance and platform development to the cloud provider. + +Although Teradata and Azure Synapse are both SQL databases designed to use massively parallel processing (MPP) techniques to achieve high query performance on exceptionally large data volumes, there are some basic differences in approach: + +- Legacy Teradata systems are often installed on-premises and use proprietary hardware, while Azure Synapse is cloud based and uses Azure storage and compute resources. + +- Since storage and compute resources are separate in the Azure environment, these resources can be scaled upwards and downwards independently, leveraging the elastic scaling capability. + +- Azure Synapse can be paused or resized as required to reduce resource utilization and cost. + +- Upgrading a Teradata configuration is a major task involving additional physical hardware and potentially lengthy database reconfiguration or reload. + +Microsoft Azure is a globally available, highly secure, scalable cloud environment, that includes Azure Synapse and an ecosystem of supporting tools and capabilities. The next diagram summarizes the Azure Synapse ecosystem. + +:::image type="content" source="../media/1-design-performance-migration/azure-synapse-ecosystem.png" border="true" alt-text="Chart showing the Azure Synapse ecosystem of supporting tools and capabilities."::: + +> [!TIP] +> Azure Synapse gives best-of-breed performance and price-performance in independent benchmarks. + +Azure Synapse provides best-of-breed relational database performance by using techniques such as massively parallel processing (MPP) and multiple levels of automated caching for frequently used data. See the results of this approach in independent benchmarks such as the one run recently by [GigaOm](https://research.gigaom.com/report/data-warehouse-cloud-benchmark/), which compares Azure Synapse to other popular cloud data warehouse offerings. Customers who have migrated to this environment have seen many benefits including: + +- Improved performance and price/performance. + +- Increased agility and shorter time to value. + +- Faster server deployment and application development. + +- Elastic scalability—only pay for actual usage. + +- Improved security/compliance. + +- Reduced storage and disaster recovery costs. + +- Lower overall TCO and better cost control (OPEX). + +To maximize these benefits, migrate new or existing data and applications to the Azure Synapse platform. In many organizations, this will include migrating an existing data warehouse from legacy on-premises platforms such as Teradata. At a high level, the basic process includes these steps: + +:::image type="content" source="../media/1-design-performance-migration/migration-steps.png" border="true" alt-text="Diagram showing the steps for preparing to migrate, migration, and post-migration."::: + +This paper looks at schema migration with a goal of equivalent or better performance of your migrated Teradata data warehouse and data marts on Azure Synapse. This paper applies specifically to migrations from an existing Teradata environment. + +## Design considerations + +### Migration scope + +> [!TIP] +> Create an inventory of objects to be migrated and document the migration process. + +#### Preparation for migration + +When migrating from a Teradata environment, there are some specific topics to consider in addition to the more general subjects described in this article. + +#### Choose the workload for the initial migration + +Legacy Teradata environments have typically evolved over time to encompass multiple subject areas and mixed workloads. When deciding where to start on an initial migration project, choose an area that can: + +- Prove the viability of migrating to Azure Synapse by quickly delivering the benefits of the new environment. + +- Allow the in-house technical staff to gain relevant experience of the processes and tools involved which can be used in migrations to other areas. + +- Create a template for further migrations specific to the source Teradata environment and the current tools and processes that are already in place. + +A good candidate for an initial migration from the Teradata environment that would enable the items above, is typically one that implements a BI/Analytics workload (rather than an OLTP workload) with a data model that can be migrated with minimal modifications—normally a start or snowflake schema. + +The migration data volume for the initial exercise should be large enough to demonstrate the capabilities and benefits of the Azure Synapse environment while quickly demonstrating the value—typically in the 1-10TB range. + +To minimize the risk and reduce implementation time for the initial migration project, confine the scope of the migration to just the data marts, such as the OLAP DB part a Teradata warehouse. However, this won't address the broader topics such as ETL migration and historical data migration. Address these topics in later phases of the project, once the migrated data mart layer is back filled with the data and processes required to build them. + +#### Lift and shift as-is versus a phased approach incorporating changes + +> [!TIP] +> 'Lift and shift' is a good starting point, even if subsequent phases will implement changes to the data model. + +Whatever the drive and scope of the intended migration, there are—broadly speaking—two types of migration: + +##### Lift and shift + +In this case, the existing data model—such as a star schema—is migrated unchanged to the new Azure Synapse platform. The emphasis is on minimizing risk and the migration time required by reducing the work needed to realize the benefits of moving to the Azure cloud environment. + +This is a good fit for existing Teradata environments where a single data mart is being migrated, or where the data is already in a well-designed star or snowflake schema—or there are other pressures to move to a more modern cloud environment. + +##### Phased approach incorporating modifications + +In cases where a legacy warehouse has evolved over a long time, you may need to re-engineer to maintain the required performance levels or to support new data like IoT steams. Migrate to Azure Synapse to get the benefits of a scalable cloud environment as part of the re-engineering process. Migration could include a change in the underlying data model, such as a move from an Inmon model to a data vault. + +Microsoft recommends moving the existing data model as-is to Azure (optionally using a VM Teradata instance in Azure) and using the performance and flexibility of the Azure environment to apply the re-engineering changes, leveraging Azure's capabilities to make the changes without impacting the existing source system. + +#### Use an Azure VM Teradata instance as part of a migration + +> [!TIP] +> Use Azure VMs to create a temporary Teradata instance to speed up migration and minimize impact on the source system. + +When migrating from an on-premises Teradata environment, you can leverage the Azure environment. Azure provides cheap cloud storage and elastic scalability to create a Teradata instance within a VM in Azure, collocating with the target Azure Synapse environment. + +With this approach, standard Teradata utilities such as Teradata Parallel Data Transporter can efficiently move the subset of Teradata tables being migrated onto the VM instance. Then, all migration tasks can take place within the Azure environment. This approach has several benefits: + +- After the initial replication of data, the source system isn't impacted by the migration tasks + +- The familiar Teradata interfaces, tools, and utilities are available within the Azure environment + +- Once in the Azure environment, there are no potential issues with network bandwidth availability between the on-premises source system and the cloud target system + +- Tools like Azure Data Factory can efficiently call utilities like Teradata Parallel Transporter to migrate data quickly and easily + +- The migration process is orchestrated and controlled entirely within the Azure environment, keeping everything in a single place + +#### Use Azure Data Factory to implement a metadata-driven migration + +Automate and orchestrate the migration process by making use of the capabilities in the Azure environment. This approach minimizes the impact on the existing Teradata environment, which may already be running close to full capacity. + +Data Factory is a cloud-based data integration service that allows creation of data-driven workflows in the cloud for orchestrating and automating data movement and data transformation. Using Data Factory, you can create and schedule data-driven workflows—called pipelines—to ingest data from disparate data stores. It can process and transform data by using compute services such as Azure HDInsight Hadoop, Spark, Azure Data Lake Analytics, and Azure Machine Learning. + +By creating metadata to list the data tables to be migrated and their location, you can use the Data Factory facilities to manage the migration process. + +### Design differences between Teradata and Azure Synapse + +#### Multiple databases versus a single database and schemas + +> [!TIP] +> Combine multiple databases into a single database in Azure Synapse and use schemas to logically separate the tables. + +In a Teradata environment, there are often multiple separate databases for individual parts of the overall environment. For example, there may be a separate database for data ingestion and staging tables, a database for the core warehouse tables, and another database for data marts, sometimes called a semantic layer. Processing these as ETL/ELT pipelines may implement cross-database joins and will move data between these separate databases. + +Querying within the Azure Synapse environment is limited to a single database. Schemas are used to separate the tables into logically separate groups. Therefore, we recommend using a series of schemas within the target Azure Synapse to mimic any separate databases migrated from the Teradata environment. If the Teradata environment already uses schemas, you may need to use a new naming convention to move the existing Teradata tables and views to the new environment—for example, concatenate the existing Teradata schema and table names into the new Azure Synapse table name and use schema names in the new environment to maintain the original separate database names. Schema consolidation naming can have dots—however, Azure Synapse Spark may have issues. You can use SQL views over the underlying tables to maintain the logical structures, but there are some potential downsides to this approach: + +- Views in Azure Synapse are read-only, so any updates to the data must take place on the underlying base tables. + +- There may already be one or more layers of views in existence, and adding an extra layer of views might impact performance and supportability as nested views are difficult to troubleshoot. + +#### Table considerations + +> [!TIP] +> Use existing indexes to indicate candidates for indexing in the migrated warehouse. + +When migrating tables between different technologies, only the raw data and the metadata that describes it gets physically moved between the two environments. Other database elements from the source system—such as indexes—aren't migrated, as these may not be needed or may be implemented differently within the new target environment. + +However, it's important to understand where performance optimizations such as indexes have been used in the source environment, as this can indicate where to add performance optimization in the new target environment. For example, if a NUSI (Non-unique secondary index) has been created within the source Teradata environment, it may indicate that a non-clustered index should be created within the migrated Azure Synapse. Other native performance optimization techniques, such as table replication, may be more applicable than a straight 'like for like' index creation. + +#### High availability for the database + +Teradata supports data replication across nodes via the FALLBACK option, where table rows that reside physically on a given node are replicated to another node within the system. This approach guarantees that data won't be lost if there's a node failure and provides the basis for failover scenarios. + +The goal of the high availability architecture in Azure SQL Database is to guarantee that your database is up and running 99.9% of time, without worrying about the impact of maintenance operations and outages. Azure automatically handles critical servicing tasks such as patching, backups, and Windows and SQL upgrades, as well as unplanned events such as underlying hardware, software, or network failures. + +Data storage in Azure Synapse is automatically [backed up](../../sql-data-warehouse/backup-and-restore.md) with snapshots. These snapshots are a built-in feature of the service that creates restore points. You don't have to enable this capability. Users can't currently delete automatic restore points where the service uses these restore points to maintain SLAs for recovery. + +Azure Synapse Dedicated SQL pool takes snapshots of the data warehouse throughout the day creating restore points that are available for seven days. This retention period can't be changed. SQL Data Warehouse supports an eight-hour recovery point objective (RPO). You can restore your data warehouse in the primary region from any one of the snapshots taken in the past seven days. If you require more granular backups, other user-defined options are available. + +#### Unsupported Teradata table types + +> [!TIP] +> Standard tables in Azure Synapse can support migrated Teradata time series and temporal data. + +Teradata supports special table types for time series and temporal data. The syntax and some of the functions for these table types aren't directly supported in Azure Synapse, but the data can be migrated into a standard table with appropriate data types and indexing or partitioning on the date/time column. + +Teradata implements the temporal query functionality via query rewriting to add additional filters within a temporal query to limit the applicable date range. If this functionality is currently used in the source Teradata environment and is to be migrated, add this additional filtering into the relevant temporal queries. + +The Azure environment also includes specific features for complex analytics on time-series data at a scale called [time series insights](https://azure.microsoft.com/services/time-series-insights/). This is aimed at IoT data analysis applications and may be more appropriate for this use case. + +#### SQL DML syntax differences + +There are a few differences in SQL Data Manipulation Language (DML) syntax between Teradata SQL and Azure Synapse (T-SQL) that you should be aware of during migration: + +- `QUALIFY`—Teradata supports the `QUALIFY` operator. For example: + + ```sql + SELECT col1 + FROM tab1 + WHERE col1='XYZ' + QUALIFY ROW_NUMBER () OVER (PARTITION by + col1 ORDER BY col1) = 1; + ``` + + The equivalent Azure Synapse syntax is: + + ```sql + SELECT * FROM ( + SELECT col1, ROW_NUMBER () OVER (PARTITION by col1 ORDER BY col1) rn + FROM tab1 WHERE col1='XYZ' + ) WHERE rn = 1; + ``` + +- Date Arithmetic—Azure Synapse has operators such as `DATEADD` and `DATEDIFF` which can be used on `DATE` or `DATETIME` fields. Teradata supports direct subtraction on dates such as 'SELECT DATE1-DATE2 FROM...' + +- In Group by ordinal, explicitly provide the T-SQL column name. + +- Teradata supports LIKE ANY syntax such as: + + ```sql + SELECT * FROM CUSTOMER + WHERE POSTCODE LIKE ANY + ('CV1%', 'CV2%', 'CV3%'); + ``` + + The equivalent in Azure Synapse syntax is: + + ```sql + SELECT * FROM CUSTOMER + WHERE + (POSTCODE LIKE 'CV1%') OR (POSTCODE LIKE 'CV2%') OR (POSTCODE LIKE 'CV3%'); + ``` + +- Depending on system settings, character comparisons in Teradata may be case insensitive by default. In Azure Synapse, character comparisons are always case sensitive. + +#### Functions, stored procedures, triggers, and sequences + +> [!TIP] +> Assess the number and type of non-data objects to be migrated as part of the preparation phase. + +When migrating from a mature legacy data warehouse environment such as Teradata, you must often migrate elements other than simple tables and views to the new target environment. Examples include functions, stored procedures, triggers, and sequences. + +As part of the preparation phase, create an inventory of these objects to be migrated, and define the method of handling them. Assign an appropriate allocation of resources in the project plan. + +There may be facilities in the Azure environment that replace the functionality implemented as functions or stored procedures in the Teradata environment. In this case, it's more efficient to use the built-in Azure facilities rather than recoding the Teradata functions. + +[Data integration partners](../../partner/data-integration.md) offer tools and services that can automate the migration. + +##### Functions + +As with most database products, Teradata supports system functions and user-defined functions within an SQL implementation. When migrating to another database platform such as Azure Synapse, common system functions are available and can be migrated without change. Some system functions may have slightly different syntax, but the required changes can be automated if so. + +For system functions where there's no equivalent, or for arbitrary user-defined functions, recode these using the language(s) available in the target environment. Azure Synapse uses the popular Transact-SQL language to implement user-defined functions. + +##### Stored procedures + +Most modern database products allow for procedures to be stored within the database. Teradata provides the SPL language for this purpose. + +A stored procedure typically contains SQL statements and some procedural logic, and may return data or a status. + +Azure Synapse Analytics from Azure SQL Data Warehouse also supports stored procedures using T-SQL. If you must migrate stored procedures, recode these procedures for their new environment. + +##### Triggers + +Azure Synapse doesn't support trigger creation, but trigger creation can be implemented with Azure Data Factory. + +##### Sequences + +With Azure Synapse, sequences are handled in a similar way to Teradata. Use [IDENTITY](/sql/t-sql/statements/create-table-transact-sql-identity-property?msclkid=8ab663accfd311ec87a587f5923eaa7b) columns or using SQL code to create the next sequence number in a series. + +### Extract metadata and data from a Teradata environment + +#### Data Definition Language (DDL) generation + +> [!TIP] +> Use existing Teradata metadata to automate the generation of CREATE TABLE and CREATE VIEW DDL for Azure Synapse Analytics. + +You can edit existing Teradata CREATE TABLE and CREATE VIEW scripts to create the equivalent definitions with modified data types, if necessary, as described in the previous section. Typically, this involves removing extra Teradata-specific clauses such as FALLBACK. + +However, all the information that specifies the current definitions of tables and views within the existing Teradata environment is maintained within system catalog tables. These tables are the best source of this information, as it's guaranteed to be up to date and complete. User-maintained documentation may not be in sync with the current table definitions. + +Access the information in these tables via views into the catalog such as `DBC.ColumnsV`, and generate the equivalent CREATE TABLE DDL statements for the equivalent tables in Azure Synapse. + +Third-party migration and ETL tools also use the catalog information to achieve the same result. + +#### Data extraction from Teradata + +> [!TIP] +> Use Teradata Parallel Transporter for most efficient data extract. + +Migrate the raw data from existing Teradata tables using standard Teradata utilities, such as BTEQ and FASTEXPORT. During a migration exercise, extract the data as efficiently as possible. Use Teradata Parallel Transporter, which uses multiple parallel FASTEXPORT streams to achieve the best throughput. + +Call Teradata Parallel Transporter directly from Azure Data Factory. This is the recommended approach for managing the data migration process whether the Teradata instance in on-premises or copied to a VM in the Azure environment, as described in the previous section. + +Recommended data formats for the extracted data include delimited text files (also called Comma Separated Values or CSV), Optimized Row Columnar (ORC), or Parquet files. + +For more detailed information on the process of migrating data and ETL from a Teradata environment, see Section 2.1. Data Migration ETL and Load from Teradata. + +## Performance recommendations for Teradata migrations + +This article provides general information and guidelines about use of performance optimization techniques for Azure Synapse and adds specific recommendations for use when migrating from a Teradata environment. + +### Differences in performance tuning approach + +> [!TIP] +> Prioritize early familiarity with Azure Synapse tuning options in a migration exercise. + +This section highlights lower-level implementation differences between Teradata and Azure Synapse for performance tuning. + +#### Data distribution options + +Azure enables the specification of data distribution methods for individual tables. The aim is to reduce the amount of data that must be moved between processing nodes when executing a query. + +For large table-large table joins, hash distributing one or, ideally, both tables on one of the join columns—which has a wide range of values to help ensure an even distribution. Perform join processing locally, as the data rows to be joined will already be collocated on the same processing node. + +Another way to achieve local joins for small table-large table joins—typically dimension table to fact table in a star schema model—is to replicate the smaller dimension table across all nodes. This ensures that any value of the join key of the larger table will have a matching dimension row locally available. The overhead of replicating the dimension tables is relatively low, provided the tables aren't very large (see [Design guidance for replicated tables](../../sql-data-warehouse/design-guidance-for-replicated-tables.md))—in which case, the hash distribution approach as described above is more appropriate. For more information, see [Distributed tables design](../../sql-data-warehouse/sql-data-warehouse-tables-distribute.md). + +#### Data indexing + +Azure Synapse provides several indexing options, but these are different from the indexing options implemented in Teradata. More details of the different indexing options are described in [table indexes](/azure/sql-data-warehouse/sql-data-warehouse-tables-index). + +Existing indexes within the source Teradata environment can however provide a useful indication of how the data is currently used. They can identify candidates for indexing within the Azure Synapse environment. + +#### Data partitioning + +In an enterprise data warehouse, fact tables can contain many billions of rows. Partitioning optimizes the maintenance and querying of these tables by splitting them into separate parts to reduce the amount of data processed. The `CREATE TABLE` statement defines the partitioning specification for a table. Partitioning should only be done on very large tables where each partition will contain at least 60 million rows. + +Only one field per table can be used for partitioning. That field is frequently a date field since many queries are filtered by date or a date range. It's possible to change the partitioning of a table after initial load by recreating the table with the new distribution using the `CREATE TABLE AS` (or CTAS) statement. See [table partitions](/azure/sql-data-warehouse/sql-data-warehouse-tables-partition) for a detailed discussion of partitioning in Azure Synapse. + +#### Data table statistics + +Ensure that statistics on data tables are up to date by building in a [statistics](../../sql/develop-tables-statistics.md) step to ETL/ELT jobs. + +#### PolyBase for data loading + +PolyBase is the most efficient method for loading large amounts of data into the warehouse since it can leverage parallel loading streams. For more information, see [PolyBase data loading strategy](../../sql/load-data-overview.md). + +#### Use workload management + +Use [workload management](../../sql-data-warehouse/sql-data-warehouse-workload-management.md?context=%2fazure%2fsynapse-analytics%2fcontext%2fcontext) instead of resource classes. ETL would be in its own workgroup and should be configured to have more resources per query (less concurrency by more resources). For more information, see [What is dedicated SQL pool in Azure Synapse Analytics](../../sql-data-warehouse/sql-data-warehouse-overview-what-is.md). + +## Next steps + +To learn more about ETL and load for Teradata migration, see the next article in this series: [Data migration, ETL, and load for Teradata migration](2-etl-load-migration-considerations.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/2-etl-load-migration-considerations.md b/articles/synapse-analytics/migration-guides/teradata/2-etl-load-migration-considerations.md new file mode 100644 index 000000000000..f8e400bc41ac --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/2-etl-load-migration-considerations.md @@ -0,0 +1,229 @@ +--- +title: "Data migration, ETL, and load for Teradata migrations" +description: Learn how to plan your data migration from Teradata to Azure Synapse to minimize the risk and impact on users. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Data migration, ETL, and load for Teradata migrations + +This article is part two of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for ETL and load migration. + +## Data migration considerations + +### Initial decisions for data migration from Teradata + +When migrating a Teradata data warehouse, you need to ask some basic data-related questions. For example: + +- Should unused table structures be migrated? + +- What's the best migration approach to minimize risk and user impact? + +- When migrating data marts—stay physical or go virtual? + +The next sections discuss these points within the context of migration from Teradata. + +#### Migrate unused tables? + +> [!TIP] +> In legacy systems, it's not unusual for tables to become redundant over time—these don't need to be migrated in most cases. + +It makes sense to only migrate tables that are in use in the existing system. Tables that aren't active can be archived rather than migrated, so that the data is available if necessary in future. It's best to use system metadata and log files rather than documentation to determine which tables are in use, because documentation can be out of date. + +If enabled, Teradata system catalog tables and logs contain information that can determine when a given table was last accessed—which can in turn be used to decide whether a table is a candidate for migration. + +Here's an example query on `DBC.Tables` that provides the date of last access and last modification: + +```sql +SELECT TableName, CreatorName, CreateTimeStamp, LastAlterName, +LastAlterTimeStamp, AccessCount, LastAccessTimeStamp +FROM DBC.Tables t +WHERE DataBaseName = 'databasename' +``` + +If logging is enabled and the log history is accessible, other information, such as SQL query text, is available in table DBQLogTbl and associated logging tables. For more information, see [Teradata log history](https://docs.teradata.com/reader/wada1XMYPkZVTqPKz2CNaw/PuQUxpyeCx4jvP8XCiEeGA). + +#### What is the best migration approach to minimize risk and impact on users? + +> [!TIP] +> Migrate the existing model as-is initially, even if a change to the data model is planned in the future. + +This question comes up often since companies often want to lower the impact of changes on the data warehouse data model to improve agility. Companies see an opportunity to do so during a migration to modernize their data model. This approach carries a higher risk because it could impact ETL jobs populating the data warehouse from a data warehouse to feed dependent data marts. Because of that risk, it's usually better to redesign on this scale after the data warehouse migration. + +Even if a data model change is an intended part of the overall migration, it's good practice to migrate the existing model as-is to the new environment (Azure Synapse in this case), rather than do any re-engineering on the new platform during migration. This approach has the advantage of minimizing the impact on existing production systems, while also leveraging the performance and elastic scalability of the Azure platform for one-off re-engineering tasks. + +When migrating from Teradata, consider creating a Teradata environment in a VM within Azure as a stepping stone in the migration process. + +#### Use a VM Teradata instance as part of a migration + +One optional approach for migrating from an on-premises Teradata environment is to leverage the Azure environment to create a Teradata instance in a VM within Azure, co-located with the target Azure Synapse environment. This is possible because Azure provides cheap cloud storage and elastic scalability. + +With this approach, standard Teradata utilities, such as Teradata Parallel Data Transporter—or third-party data replication tools, such as Attunity Replicate—can be used to efficiently move the subset of Teradata tables that need to be migrated to the VM instance. Then, all migration tasks can take place within the Azure environment. This approach has several benefits: + +- After the initial replication of data, migration tasks don't impact the source system. + +- The Azure environment has familiar Teradata interfaces, tools, and utilities. + +- The Azure environment provides network bandwidth availability between the on-premises source system and the cloud target system. + +- Tools like Azure Data Factory can efficiently call utilities like Teradata Parallel Transporter to migrate data quickly and easily. + +- The migration process is orchestrated and controlled entirely within the Azure environment. + +#### Migrate data marts - stay physical or go virtual? + +> [!TIP] +> Virtualizing data marts can save on storage and processing resources. + +In legacy Teradata data warehouse environments, it's common practice to create several data marts that are structured to provide good performance for ad hoc self-service queries and reports for a given department or business function within an organization. As such, a data mart typically consists of a subset of the data warehouse and contains aggregated versions of the data in a form that enables users to easily query that data with fast response times via user-friendly query tools such as Microsoft Power BI, Tableau, or MicroStrategy. This form is typically a dimensional data model. One use of data marts is to expose the data in a usable form, even if the underlying warehouse data model is something different, such as a data vault. + +You can use separate data marts for individual business units within an organization to implement robust data security regimes, by only allowing users to access specific data marts that are relevant to them, and eliminating, obfuscating, or anonymizing sensitive data. + +If these data marts are implemented as physical tables, they'll require additional storage resources to store them, and additional processing to build and refresh them regularly. Also, the data in the mart will only be as up to date as the last refresh operation, and so may be unsuitable for highly volatile data dashboards. + +> [!TIP] +> The performance and scalability of Azure Synapse enables virtualization without sacrificing performance. + +With the advent of relatively low-cost scalable MPP architectures, such as Azure Synapse, and the inherent performance characteristics of such architectures, it may be that you can provide data mart functionality without having to instantiate the mart as a set of physical tables. This is achieved by effectively virtualizing the data marts via SQL views onto the main data warehouse, or via a virtualization layer using features such as views in Azure or the [visualization products of Microsoft partners](../../partner/data-integration.md). This approach simplifies or eliminates the need for additional storage and aggregation processing and reduces the overall number of database objects to be migrated. + +There's another potential benefit to this approach: by implementing the aggregation and join logic within a virtualization layer, and presenting external reporting tools via a virtualized view, the processing required to create these views is pushed down into the data warehouse, which is generally the best place to run joins, aggregations, and other related operations on large data volumes. + +The primary drivers for choosing a virtual data mart implementation over a physical data mart are: + +- More agility—a virtual data mart is easier to change than physical tables and the associated ETL processes. + +- Lower total cost of ownership—a virtualized implementation requires fewer data stores and copies of data. + +- Elimination of ETL jobs to migrate and simplify data warehouse architecture in a virtualized environment. + +- Performance—although physical data marts have historically been more performant, virtualization products now implement intelligent caching techniques to mitigate. + +### Data migration from Teradata + +#### Understand your data + +Part of migration planning is understanding in detail the volume of data that needs to be migrated, since that can impact decisions about the migration approach. Use system metadata to determine the physical space taken up by the raw data within the tables to be migrated. In this context, 'raw data' means the amount of space used by the data rows within a table, excluding overheads such as indexes and compression. This is especially true for the largest fact tables since these will typically comprise more than 95% of the data. + +You can get an accurate number for the volume of data to be mitigated for a given table by extracting a representative sample of the data—for example, one million rows—to an uncompressed delimited flat ASCII data file. Then, use the size of that file to get an average raw data size per row of that table. Finally, multiply that average size by the total number of rows in the full table to give a raw data size for the table. Use that raw data size in your planning. + +## ETL migration considerations + +### Initial decisions regarding Teradata ETL migration + +> [!TIP] +> Plan the approach to ETL migration ahead of time and leverage Azure facilities where appropriate. + +For ETL/ELT processing, legacy Teradata data warehouses may use custom-built scripts using Teradata utilities such as BTEQ and Teradata Parallel Transporter (TPT), or third-party ETL tools such as Informatica or Ab Initio. Sometimes, Teradata data warehouses use a combination of ETL and ELT approaches that's evolved over time. When planning a migration to Azure Synapse, you need to determine the best way to implement the required ETL/ELT processing in the new environment while minimizing the cost and risk involved. To learn more about ETL and ELT processing, see [ELT vs ETL Design approach](../../sql-data-warehouse/design-elt-data-loading.md). + +The following sections discuss migration options and make recommendations for various use cases. This flowchart summarizes one approach: + +:::image type="content" source="../media/2-etl-load-migration-considerations/migration-options-flowchart.png" border="true" alt-text="Flowchart of migration options and recommendations."::: + +The first step is always to build an inventory of ETL/ELT processes that need to be migrated. As with other steps, it's possible that the standard 'built-in' Azure features make it unnecessary to migrate some existing processes. For planning purposes, it's important to understand the scale of the migration to be performed. + +In the preceding flowchart, decision 1 relates to a high-level decision about whether to migrate to a totally Azure-native environment. If you're moving to a totally Azure-native environment, we recommend that you re-engineer the ETL processing using [Pipelines and activities in Azure Data Factory](../../../data-factory/concepts-pipelines-activities.md?msclkid=b6ea2be4cfda11ec929ac33e6e00db98&tabs=data-factory) or [Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c). If you're not moving to a totally Azure-native environment, then decision 2 is whether an existing third-party ETL tool is already in use. + +In the Teradata environment, some or all ETL processing may be performed by custom scripts using Teradata-specific utilities like BTEQ and TPT. In this case, your approach should be to re-engineer using Data Factory. + +> [!TIP] +> Leverage investment in existing third-party tools to reduce cost and risk. + +If a third-party ETL tool is already in use, and especially if there's a large investment in skills or several existing workflows and schedules use that tool, then decision 3 is whether the tool can efficiently support Azure Synapse as a target environment. Ideally, the tool will include 'native' connectors that can leverage Azure facilities like PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql), for most efficient data loading. There's a way to call an external process, such as PolyBase or `COPY INTO`, and pass in the appropriate parameters. In this case, leverage existing skills and workflows, with Azure Synapse as the new target environment. + +If you decide to retain an existing third-party ETL tool, there may be benefits to running that tool within the Azure environment (rather than on an existing on-premises ETL server) and having Azure Data Factory handle the overall orchestration of the existing workflows. One particular benefit is that less data needs to be downloaded from Azure, processed, and then uploaded back into Azure. So, decision 4 is whether to leave the existing tool running as-is or move it into the Azure environment to achieve cost, performance, and scalability benefits. + +### Re-engineer existing Teradata-specific scripts + +If some or all the existing Teradata warehouse ETL/ELT processing is handled by custom scripts that utilize Teradata-specific utilities, such as BTEQ, MLOAD, or TPT, these scripts need to be recoded for the new Azure Synapse environment. Similarly, if ETL processes were implemented using stored procedures in Teradata, then these will also have to be recoded. + +> [!TIP] +> The inventory of ETL tasks to be migrated should include scripts and stored procedures. + +Some elements of the ETL process are easy to migrate—for example, by simple bulk data load into a staging table from an external file. It may even be possible to automate those parts of the process, for example, by using PolyBase instead of fast load or MLOAD. If the exported files are Parquet, you can use a native Parquet reader, which is a faster option than PolyBase. Other parts of the process that contain arbitrary complex SQL and/or stored procedures will take more time to re-engineer. + +One way of testing Teradata SQL for compatibility with Azure Synapse is to capture some representative SQL statements from Teradata logs, then prefix those queries with `EXPLAIN`, and then—assuming a like-for-like migrated data model in Azure Synapse—run those `EXPLAIN` statements in Azure Synapse. Any incompatible SQL will generate an error, and the error information can determine the scale of the recoding task. + +[Microsoft partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration) offer tools and services to migrate Teradata SQL and stored procedures to Azure Synapse. + +### Use third party ETL tools + +As described in the previous section, in many cases the existing legacy data warehouse system will already be populated and maintained by third-party ETL products. For a list of Microsoft data integration partners for Azure Synapse, see [Data Integration partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration). + +## Data loading from Teradata + +### Choices available when loading data from Teradata + +> [!TIP] +> Third-party tools can simplify and automate the migration process and therefore reduce risk. + +When migrating data from a Teradata data warehouse, there are some basic questions associated with data loading that need to be resolved. You'll need to decide how the data will be physically moved from the existing on-premises Teradata environment into Azure Synapse in the cloud, and which tools will be used to perform the transfer and load. Consider the following questions, which are discussed in the next sections. + +- Will you extract the data to files, or move it directly via a network connection? + +- Will you orchestrate the process from the source system, or from the Azure target environment? + +- Which tools will you use to automate and manage the process? + +#### Transfer data via files or network connection? + +> [!TIP] +> Understand the data volumes to be migrated and the available network bandwidth since these factors influence the migration approach decision. + +Once the database tables to be migrated have been created in Azure Synapse, you can move the data to populate those tables out of the legacy Teradata system and load it into the new environment. There are two basic approaches: + +- **File Extract**—Extract the data from the Teradata tables to flat files, normally in CSV format, via BTEQ, Fast Export, or Teradata Parallel Transporter (TPT). Use TPT whenever possible since it's the most efficient in terms of data throughput. + + This approach requires space to land the extracted data files. The space could be local to the Teradata source database (if sufficient storage is available), or remote in Azure Blob Storage. The best performance is achieved when a file is written locally, since that avoids network overhead. + + To minimize the storage and network transfer requirements, it's good practice to compress the extracted data files using a utility like gzip. + + Once extracted, the flat files can either be moved into Azure Blob Storage (collocated with the target Azure Synapse instance) or loaded directly into Azure Synapse using PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql). The method for physically moving data from local on-premises storage to the Azure cloud environment depends on the amount of data and the available network bandwidth. + + Microsoft provides different options to move large volumes of data, including AZCopy for moving files across the network into Azure Storage, Azure ExpressRoute for moving bulk data over a private network connection, and Azure Data Box where the files are moved to a physical storage device that's then shipped to an Azure data center for loading. For more information, see [data transfer](/azure/architecture/data-guide/scenarios/data-transfer). + +- **Direct extract and load across network**—The target Azure environment sends a data extract request, normally via a SQL command, to the legacy Teradata system to extract the data. The results are sent across the network and loaded directly into Azure Synapse, with no need to 'land' the data into intermediate files. The limiting factor in this scenario is normally the bandwidth of the network connection between the Teradata database and the Azure environment. For very large data volumes this approach may not be practical. + +There's also a hybrid approach that uses both methods. For example, you can use the direct network extract approach for smaller dimension tables and samples of the larger fact tables to quickly provide a test environment in Azure Synapse. For the large volume historical fact tables, you can use the file extract and transfer approach using Azure Data Box. + +#### Orchestrate from Teradata or Azure? + +The recommended approach when moving to Azure Synapse is to orchestrate the data extract and loading from the Azure environment using [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779), as well as associated utilities, such as PolyBase or [COPY INTO](/sql/t-sql/statements/copy-into-transact-sql), for most efficient data loading. This approach leverages the Azure capabilities and provides an easy method to build reusable data loading pipelines. + +Other benefits of this approach include reduced impact on the Teradata system during the data load process since the management and loading process is running in Azure, and the ability to automate the process by using metadata-driven data load pipelines. + +#### Which tools can be used? + +The task of data transformation and movement is the basic function of all ETL products. If one of these products is already in use in the existing Teradata environment, then using the existing ETL tool may simplify data migration data from Teradata to Azure Synapse. This approach assumes that the ETL tool supports Azure Synapse as a target environment. For more information on tools that support Azure Synapse, see [Data integration partners](/azure/sql-data-warehouse/sql-data-warehouse-partner-data-integration). + +If you're using an ETL tool, consider running that tool within the Azure environment to benefit from Azure cloud performance, scalability, and cost, and free up resources in the Teradata data center. Another benefit is reduced data movement between the cloud and on-premises environments. + +## Summary + +To summarize, our recommendations for migrating data and associated ETL processes from Teradata to Azure Synapse are: + +- Plan ahead to ensure a successful migration exercise. + +- Build a detailed inventory of data and processes to be migrated as soon as possible. + +- Use system metadata and log files to get an accurate understanding of data and process usage. Don't rely on documentation since it may be out of date. + +- Understand the data volumes to be migrated, and the network bandwidth between the on-premises data center and Azure cloud environments. + +- Consider using a Teradata instance in an Azure VM as a stepping stone to offload migration from the legacy Teradata environment. + +- Leverage standard built-in Azure features to minimize the migration workload. + +- Identify and understand the most efficient tools for data extraction and loading in both Teradata and Azure environments. Use the appropriate tools at each phase in the process. + +- Use Azure facilities such as [Azure Synapse Pipelines](../../get-started-pipelines.md?msclkid=b6e99db9cfda11ecbaba18ca59d5c95c) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) to orchestrate and automate the migration process while minimizing impact on the Teradata system. + +## Next steps + +To learn more about security access operations, see the next article in this series: [Security, access, and operations for Teradata migrations](3-security-access-operations.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/3-security-access-operations.md b/articles/synapse-analytics/migration-guides/teradata/3-security-access-operations.md new file mode 100644 index 000000000000..0c892064d487 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/3-security-access-operations.md @@ -0,0 +1,380 @@ +--- +title: "Security, access, and operations for Teradata migrations" +description: Learn about authentication, users, roles, permissions, monitoring, and auditing, and workload management in Azure Synapse and Teradata. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Security, access, and operations for Teradata migrations + +This article is part three of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for security access operations. + +## Security considerations + +This article discusses connection methods for existing legacy Teradata environments and how they can be migrated to Azure Synapse with minimal risk and user impact. + +We assume there's a requirement to migrate the existing methods of connection and user, role, and permission structure as is. If this isn't the case, then you can use Azure utilities such as Azure portal to create and manage a new security regime. + +For more information on the [Azure Synapse security](../../sql-data-warehouse/sql-data-warehouse-overview-manage-security.md#authorization) options see [Security whitepaper](../../guidance/security-white-paper-introduction.md). + +### Connection and authentication + +#### Teradata Authorization Options + +> [!TIP] +> Authentication in both Teradata and Azure Synapse can be "in database" or through external methods. + +Teradata supports several mechanisms for connection and authorization. Valid mechanism values are: + +- **TD1**—selects Teradata 1 as the authentication mechanism. Username and password are required. + +- **TD2**—selects Teradata 2 as the authentication mechanism. Username and password are required. + +- **TDNEGO**—selects one of the authentication mechanisms automatically based on the policy, without user involvement. + +- **LDAP**—selects Lightweight Directory Access Protocol (LDAP) as the Authentication Mechanism. The application provides the username and password. + +- **KRB5**—selects Kerberos (KRB5) on Windows clients working with Windows servers. To log on using KRB5, the user needs to supply a domain, username, and password. The domain is specified by setting the username to `MyUserName@MyDomain`. + +- **NTLM**—selects NTLM on Windows clients working with Windows servers. The application provides the username and password. + +Kerberos (KRB5), Kerberos Compatibility (KRB5C), NT LAN Manager (NTLM), and NT LAN Manager Compatibility (NTLMC) are for Windows only. + +#### Azure Synapse authorization options + +Azure Synapse supports two basic options for connection and authorization: + +- **SQL authentication**: SQL authentication is via a database connection that includes a database identifier, user ID, and password plus other optional parameters. This is functionally equivalent to Teradata TD1, TD2 and default connections. + +- **Azure Active Directory (Azure AD) authentication**: With Azure Active Directory authentication, you can centrally manage the identities of database users and other Microsoft services in one central location. Central ID management provides a single place to manage SQL Data Warehouse users and simplifies permission management. Azure AD can also support connections to LDAP and Kerberos services—for example, Azure AD can be used to connect to existing LDAP directories if these are to remain in place after migration of the database. + +### Users, roles, and permissions + +#### Overview + +> [!TIP] +> High-level planning is essential for a successful migration project. + +Both Teradata and Azure Synapse implement database access control via a combination of users, roles, and permissions. Both use standard `SQL CREATE USER` and `CREATE ROLE` statements to define users and roles, and `GRANT` and `REVOKE` statements to assign or remove permissions to those users and/or roles. + +> [!TIP] +> Automation of migration processes is recommended to reduce elapsed time and scope for errors. + +Conceptually the two databases are similar, and it might be possible to automate the migration of existing user IDs, roles, and permissions to some degree. Migrate such data by extracting the existing legacy user and role information from the Teradata system catalog tables and generating matching equivalent `CREATE USER` and `CREATE ROLE` statements to be run in Azure Synapse to recreate the same user/role hierarchy. + +After data extraction, use Teradata system catalog tables to generate equivalent `GRANT` statements to assign permissions (where an equivalent one exists). The following diagram shows how to use existing metadata to generate the necessary SQL. + +:::image type="content" source="../media/3-security-access-operations/automating-migration-privileges.png" border="true" alt-text="Chart showing how to automate the migration of privileges from an existing system."::: + +#### Users and roles + +> [!TIP] +> Migration of a data warehouse requires more than just tables, views, and SQL statements. + +The information about current users and roles in a Teradata system is found in the system catalog tables `DBC.USERS` (or `DBC.DATABASES`) and `DBC.ROLEMEMBERS`. Query these tables (if the user has `SELECT` access to those tables) to obtain current lists of users and roles defined within the system. The following are examples of queries to do this for individual users: + +```sql +/***SQL to find all users***/ +SELECT +DatabaseName AS UserName +FROM DBC.Databases +WHERE dbkind = 'u'; + +/***SQL to find all roles***/ +SELECT A.ROLENAME, A.GRANTEE, A.GRANTOR, + A.DefaultRole, + A.WithAdmin, + B.DATABASENAME, + B.TABLENAME, + B.COLUMNNAME, + B.GRANTORNAME, + B.AccessRight +FROM DBC.ROLEMEMBERS A +JOIN DBC.ALLROLERIGHTS B +ON A.ROLENAME = B.ROLENAME +GROUP BY 1,2,3,4,5,6,7 +ORDER BY 2,1,6; +``` + +These examples modify `SELECT` statements to produce a result set, which is a series of `CREATE USER` and `CREATE ROLE` statements, by including the appropriate text as a literal within the `SELECT` statement. + +There's no way to retrieve existing passwords, so you need to implement a scheme for allocating new initial passwords on Azure Synapse. + +#### Permissions + +> [!TIP] +> There are equivalent Azure Synapse permissions for basic database operations such as DML and DDL. + +In a Teradata system, the system tables `DBC.ALLRIGHTS` and `DBC.ALLROLERIGHTS` hold the access rights for users and roles. Query these tables (if the user has `SELECT` access to those tables) to obtain current lists of access rights defined within the system. The following are examples of queries for individual users: + +```sql +/**SQL for AccessRights held by a USER***/ +SELECT UserName, DatabaseName,TableName,ColumnName, +CASE WHEN Abbv.AccessRight IS NOT NULL THEN Abbv.Description ELSE +ALRTS.AccessRight +END AS AccessRight, GrantAuthority, GrantorName, AllnessFlag, CreatorName, CreateTimeStamp +FROM DBC.ALLRIGHTS ALRTS LEFT OUTER JOIN AccessRightsAbbv Abbv +ON ALRTS.AccessRight = Abbv.AccessRight +WHERE UserName='UserXYZ' +Order By 2,3,4,5; + +/**SQL for AccessRights held by a ROLE***/ +SELECT RoleName, DatabaseName,TableName,ColumnName, +CASE WHEN Abbv.AccessRight IS NOT NULL THEN Abbv.Description ELSE +ALRTS.AccessRight +END AS AccessRight, GrantorName, CreateTimeStamp +FROM DBC.ALLROLERIGHTS ALRTS LEFT OUTER JOIN AccessRightsAbbv +Abbv +ON ALRTS.AccessRight = Abbv.AccessRight +WHERE RoleName='BI_DEVELOPER' +Order By 2,3,4,5; +``` + +Modify these example `SELECT` statements to produce a result set which is a series of `GRANT` statements by including the appropriate text as a literal within the `SELECT` statement. + +Use the table `AccessRightsAbbv` to look up the full text of the access right, as the join key is an abbreviated 'type' field. See the following table for a list of Teradata access rights and their equivalent in Azure Synapse. + +| Teradata permission name | Teradata type | Azure Synapse equivalent | +|------------------------------|---------------|-----------------| +| **ABORT SESSION** | AS | KILL DATABASE CONNECTION | +| **ALTER EXTERNAL PROCEDURE** | AE | \*\*\*\* | +| **ALTER FUNCTION** | AF | ALTER FUNCTION | +| **ALTER PROCEDURE** | AP | ALTER PROCEDURE | +| **CHECKPOINT** | CP | CHECKPOINT | +| **CREATE AUTHORIZATION** | CA | CREATE LOGIN | +| **CREATE DATABASE** | CD | CREATE DATABASE | +| **CREATE EXTERNAL** **PROCEDURE** | CE | \*\*\*\* | +| **CREATE FUNCTION** | CF | CREATE FUNCTION | +| **CREATE GLOP** | GC | \*\*\* | +| **CREATE MACRO** | CM | CREATE PROCEDURE \*\* | +| **CREATE OWNER PROCEDURE** | OP | CREATE PROCEDURE | +| **CREATE PROCEDURE** | PC | CREATE PROCEDURE | +| **CREATE PROFILE** | CO | CREATE LOGIN \* | +| **CREATE ROLE** | CR | CREATE ROLE | +| **DROP DATABASE** | DD | DROP DATABASE| +| **DROP FUNCTION** | DF | DROP FUNCTION | +| **DROP GLOP** | GD | \*\*\* | +| **DROP MACRO** | DM | DROP PROCEDURE \*\* | +| **DROP PROCEDURE** | PD | DELETE PROCEDURE | +| **DROP PROFILE** | DO | DROP LOGIN \* | +| **DROP ROLE** | DR | DELETE ROLE | +| **DROP TABLE** | DT | DROP TABLE | +| **DROP TRIGGER** | DG | \*\*\* | +| **DROP USER** | DU | DROP USER | +| **DROP VIEW** | DV | DROP VIEW | +| **DUMP** | DP | \*\*\*\* | +| **EXECUTE** | E | EXECUTE | +| **EXECUTE FUNCTION** | EF | EXECUTE | +| **EXECUTE PROCEDURE** | PE | EXECUTE | +| **GLOP MEMBER** | GM | \*\*\* | +| **INDEX** | IX | CREATE INDEX | +| **INSERT** | I | INSERT | +| **MONRESOURCE** | MR | \*\*\*\*\* | +| **MONSESSION** | MS | \*\*\*\*\* | +| **OVERRIDE DUMP CONSTRAINT** | OA | \*\*\*\* | +| **OVERRIDE RESTORE CONSTRAINT** | OR | \*\*\*\* | +| **REFERENCES** | RF | REFERENCES | +| **REPLCONTROL** | RO | \*\*\*\*\* | +| **RESTORE** | RS | \*\*\*\* | +| **SELECT** | R | SELECT | +| **SETRESRATE** | SR | \*\*\*\*\* | +| **SETSESSRATE** | SS | \*\*\*\*\* | +| **SHOW** | SH | \*\*\* | +| **UPDATE** | U | UPDATE | + +Comments on the `AccessRightsAbbv` table: + +\* Teradata `PROFILE` is functionally equivalent to `LOGIN` in Azure Synapse + +\*\* In Teradata there are macros and stored procedures. The following table summarizes the differences between them: + + | MACRO | Stored procedure | + |-|-| + | Contains SQL | Contains SQL | + | May contain BTEQ dot commands | Contains comprehensive SPL | + | May receive parameter values passed to it | May receive parameter values passed to it | + | May retrieve one or more rows | Must use a cursor to retrieve more than one row | + | Stored in DBC PERM space | Stored in DATABASE or USER PERM | + | Returns rows to the client | May return one or more values to client as parameters | + +In Azure Synapse, procedures can be used to provide this functionality. + +\*\*\* `SHOW`, `GLOP`, and `TRIGGER` have no direct equivalent in Azure Synapse. + +\*\*\*\* These features are managed automatically by the system in Azure Synapse—see [Operational considerations](#operational-considerations). + +\*\*\*\*\* In Azure Synapse, these features are handled outside of the database. + +Refer to [Azure Synapse Analytics security permissions](../../guidance/security-white-paper-introduction.md). + +## Operational considerations + +> [!TIP] +> Operational tasks are necessary to keep any data warehouse operating efficiently. + +This section discusses how to implement typical Teradata operational tasks in Azure Synapse with minimal risk and impact to users. + +As with all data warehouse products, once in production there are ongoing management tasks that are necessary to keep the system running efficiently and to provide data for monitoring and auditing. Resource utilization and capacity planning for future growth also falls into this category, as does backup/restore of data. + +While conceptually the management and operations tasks for different data warehouses are similar, the individual implementations may differ. In general, modern cloud-based products such as Azure Synapse tend to incorporate a more automated and "system managed" approach (as opposed to a more manual approach in legacy data warehouses such as Teradata). + +The following sections compare Teradata and Azure Synapse options for various operational tasks. + +### Housekeeping tasks + +> [!TIP] +> Housekeeping tasks keep a production warehouse operating efficiently and optimize use of resources such as storage. + +In most legacy data warehouse environments, there's a requirement to perform regular 'housekeeping' tasks such as reclaiming disk storage space that can be freed up by removing old versions of updated or deleted rows, or reorganizing data log files or index blocks for efficiency. Collecting statistics is also a potentially time-consuming task. Collecting statistics is required after a bulk data ingest to provide the query optimizer with up-to-date data to base generation of query execution plans. + +Teradata recommends collecting statistics as follows: + +- Collect statistics on unpopulated tables to set up the interval histogram used in internal processing. This initial collection makes subsequent statistics collections faster. Make sure to recollect statistics after data is added. + +- Prototype phase, newly populated tables. + +- Production phase, after a significant percentage of change to the table or partition (~10% rows). For high volumes of nonunique values, such as dates or timestamps, it may be advantageous to recollect at 7%. + +- Recommendation: Collect production phase statistics after you've created users and applied real world query loads to the database (up to about three months of querying). + +- Collect statistics in the first few weeks after an upgrade or migration during periods of low CPU utilization. + +Statistics collection can be managed manually using Automated Statistics Management open APIs or automatically using the Teradata Viewpoint Stats Manager portlet. + +> [!TIP] +> Automate and monitor housekeeping tasks in Azure. + +Teradata Database contains many log tables in the Data Dictionary that accumulate data, either automatically or after certain features are enabled. Because log data grows over time, purge older information to avoid using up permanent space. There are options to automate the maintenance of these logs available. The Teradata dictionary tables that require maintenance are discussed next. + +#### Dictionary tables to maintain + +Reset accumulators and peak values using the `DBC.AMPUsage` view and the `ClearPeakDisk` macro provided with the software: + +- `DBC.Acctg`: resource usage by account/user + +- `DBC.DataBaseSpace`: database and table space accounting + +Teradata automatically maintains these tables, but good practices can reduce their size: + +- `DBC.AccessRights`: user rights on objects + +- `DBC.RoleGrants`: role rights on objects + +- `DBC.Roles`: defined roles + +- `DBC.Accounts`: account codes by user + +Archive these logging tables (if desired) and purge information 60-90 days old. Retention depends on customer requirements: + +- `DBC.SW_Event_Log`: database console log + +- `DBC.ResUsage`: resource monitoring tables + +- `DBC.EventLog`: session logon/logoff history + +- `DBC.AccLogTbl`: logged user/object events + +- `DBC.DBQL tables`: logged user/SQL activity + +- `.NETSecPolicyLogTbl`: logs dynamic security policy audit trails + +- `.NETSecPolicyLogRuleTbl`: controls when and how dynamic security policy is logged + +Purge these tables when the associated removable media is expired and overwritten: + +- `DBC.RCEvent`: archive/recovery events + +- `DBC.RCConfiguration`: archive/recovery config + +- `DBC.RCMedia`: VolSerial for Archive/recovery + +Azure Synapse has an option to automatically create statistics so that they can be used as needed. Perform defragmentation of indexes and data blocks manually, on a scheduled basis, or automatically. Leveraging native built-in Azure capabilities can reduce the effort required in a migration exercise. + +### Monitoring and auditing + +> [!TIP] +> Over time, several different tools have been implemented to allow monitoring and logging of Teradata systems. + +Teradata provides several tools to monitor the operation including Teradata Viewpoint and Ecosystem Manager. For logging query history, the Database Query Log (DBQL) is a Teradata Database feature that provides a series of predefined tables that can store historical records of queries and their duration, performance, and target activity based on user-defined rules. + +Database administrators can use Teradata Viewpoint to determine system status, trends, and individual query status. By observing trends in system usage, system administrators are better able to plan project implementations, batch jobs, and maintenance to avoid peak periods of use. Business users can use Teradata Viewpoint to quickly access the status of reports and queries and drill down into details. + +> [!TIP] +> Azure portal provides a UI to manage monitoring and auditing tasks for all Azure data and processes. + +Similarly, Azure Synapse provides a rich monitoring experience within the Azure portal to provide insights into your data warehouse workload. The Azure portal is the recommended tool when monitoring your data warehouse as it provides configurable retention periods, alerts, recommendations, and customizable charts and dashboards for metrics and logs. + +The portal also enables integration with other Azure monitoring services such as Operations Management Suite (OMS) and [Azure Monitor](../../monitoring/how-to-monitor-using-azure-monitor.md?msclkid=d5e9e46ecfe111ec8ba8ee5360e77c4c) (logs) to provide a holistic monitoring experience for not only the data warehouse but also the entire Azure analytics platform for an integrated monitoring experience. + +> [!TIP] +> Low-level and system-wide metrics are automatically logged in Azure Synapse. + +Resource utilization statistics for the Azure Synapse are automatically logged within the system. The metrics include usage statistics for CPU, memory, cache, I/O and temporary workspace for each query as well as connectivity information (such as failed connection attempts). + +Azure Synapse provides a set of [Dynamic Management Views](../../sql-data-warehouse/sql-data-warehouse-manage-monitor.md?msclkid=3e6eefbccfe211ec82d019ada29b1834) (DMVs). These views are useful when actively troubleshooting and identifying performance bottlenecks with your workload. + +For more information, see [Azure Synapse operations and management options](/azure/sql-data-warehouse/sql-data-warehouse-how-to-manage-and-monitor-workload-importance). + +### High Availability (HA) and Disaster Recovery (DR) + +Teradata implements features such as Fallback, Archive Restore Copy utility (ARC), and Data Stream Architecture (DSA) to provide protection against data loss and high availability (HA) via replication and archive of data. Disaster Recovery options include Dual-Active systems, DR as a service, or a replacement system depending on the recovery time requirement. + +> [!TIP] +> Azure Synapse creates snapshots automatically to ensure fast recovery times. + +Azure Synapse uses database snapshots to provide high availability of the warehouse. A data warehouse snapshot creates a restore point that can be used to recover or copy a data warehouse to a previous state. Since Azure Synapse is a distributed system, a data warehouse snapshot consists of many files that are in Azure storage. Snapshots capture incremental changes from the data stored in your data warehouse. + +Azure Synapse automatically takes snapshots throughout the day creating restore points that are available for seven days. This retention period can't be changed. Azure Synapse supports an eight-hour recovery point objective (RPO). A data warehouse can be restored in the primary region from any one of the snapshots taken in the past seven days. + +> [!TIP] +> Use user-defined snapshots to define a recovery point before key updates. + +User-defined restore points are also supported, allowing manual triggering of snapshots to create restore points of a data warehouse before and after large modifications. This capability ensures that restore points are logically consistent, which provides additional data protection in case of any workload interruptions or user errors for a desired RPO of less than 8 hours. + +> [!TIP] +> Microsoft Azure provides automatic backups to a separate geographical location to enable DR. + +As well as the snapshots described previously, Azure Synapse also performs as standard a geo-backup once per day to a [paired data center](/azure/best-practices-availability-paired-regions). The RPO for a geo-restore is 24 hours. You can restore the geo-backup to a server in any other region where Azure Synapse is supported. A geo-backup ensures that a data warehouse can be restored in case the restore points in the primary region aren't available. + +### Workload management + +> [!TIP] +> In a production data warehouse, there are typically mixed workloads which have different resource usage characteristics running concurrently. + +A workload is a class of database requests with common traits whose access to the database can be managed with a set of rules. Workloads are useful for: + +- Setting different access priorities for different types of requests. + +- Monitoring resource usage patterns, performance tuning, and capacity planning. + +- Limiting the number of requests or sessions that can run at the same time. + +In a Teradata system, workload management is the act of managing workload performance by monitoring system activity and acting when pre-defined limits are reached. Workload management uses rules, and each rule applies only to some database requests. However, the collection of all rules applies to all active work on the platform. Teradata Active System Management (TASM) performs full workload management in a Teradata Database. + +In Azure Synapse, resource classes are pre-determined resource limits that govern compute resources and concurrency for query execution. Resource classes can help you manage your workload by setting limits on the number of queries that run concurrently and on the compute resources assigned to each query. There's a trade-off between memory and concurrency. + +See [Resource classes for workload management](/azure/sql-data-warehouse/resource-classes-for-workload-management) for detailed information. + +This information can also be used for capacity planning, determining the resources required for additional users or application workload. This also applies to planning scale up/scale downs of compute resources for cost-effective support of 'peaky' workloads. + +### Scale compute resources + +> [!TIP] +> A major benefit of Azure is the ability to independently scale up and down compute resources on demand to handle peaky workloads cost-effectively. + +The architecture of Azure Synapse separates storage and compute, allowing each to scale independently. As a result, [compute resources can be scaled](../../sql-data-warehouse/quickstart-scale-compute-portal.md) to meet performance demands independent of data storage. You can also pause and resume compute resources. A natural benefit of this architecture is that billing for compute and storage is separate. If a data warehouse isn't in use, you can save on compute costs by pausing compute. + +Compute resources can be scaled up or scaled back by adjusting the data warehouse units setting for the data warehouse. Loading and query performance will increase linearly as you add more data warehouse units. + +Adding more compute nodes adds more compute power and ability to leverage more parallel processing. As the number of compute nodes increases, the number of distributions per compute node decreases, providing more compute power and parallel processing for queries. Similarly, decreasing data warehouse units reduces the number of compute nodes, which reduces the compute resources for queries. + +## Next steps + +To learn more about visualization and reporting, see the next article in this series: [Visualization and reporting for Teradata migrations](4-visualization-reporting.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/4-visualization-reporting.md b/articles/synapse-analytics/migration-guides/teradata/4-visualization-reporting.md new file mode 100644 index 000000000000..868218f4a67b --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/4-visualization-reporting.md @@ -0,0 +1,316 @@ +--- +title: "Visualization and reporting for Teradata migrations" +description: Learn about Microsoft and third-party BI tools for reports and visualizations in Azure Synapse compared to Teradata. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Visualization and reporting for Teradata migrations + +This article is part four of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for visualization and reporting. + +## Access Azure Synapse Analytics using Microsoft and third-party BI tools + +Almost every organization accesses data warehouses and data marts by using a range of BI tools and applications, such as: + +- Microsoft BI tools, like Power BI. + +- Office applications, like Microsoft Excel spreadsheets. + +- Third-party BI tools from various vendors. + +- Custom analytic applications that have embedded BI tool functionality inside the application. + +- Operational applications that request BI on demand by invoking queries and reports as-a-service on a BI platform, that in-turn queries data in the data warehouse or data marts that are being migrated. + +- Interactive data science development tools, for instance, Azure Synapse Spark Notebooks, Azure Machine Learning, RStudio, Jupyter notebooks. + +The migration of visualization and reporting as part of a data warehouse migration program, means that all the existing queries, reports, and dashboards generated and issued by these tools and applications need to run on Azure Synapse and yield the same results as they did in the original data warehouse prior to migration. + +> [!TIP] +> Existing users, user groups, roles and assignments of access security privileges need to be migrated first for migration of reports and visualizations to succeed. + +To make that happen, everything that BI tools and applications depend on still needs to work once you migrate your data warehouse schema and data to Azure Synapse. That includes the obvious and the not so obvious—such as access and security. While access and security are discussed in [another guide](3-security-access-operations.md) in this series, it's a prerequisite to accessing data in the migrated system. Access and security include ensuring that: + +- Authentication is migrated to let users sign in to the data warehouse and data mart databases on Azure Synapse. + +- All users are migrated to Azure Synapse. + +- All user groups are migrated to Azure Synapse. + +- All roles are migrated to Azure Synapse. + +- All authorization privileges governing access control are migrated to Azure Synapse. + +- User, role, and privilege assignments are migrated to mirror what you had on your existing data warehouse before migration. For example: + - Database object privileges assigned to roles + - Roles assigned to user groups + - Users assigned to user groups and/or roles + +> [!TIP] +> Communication and business user involvement is critical to success. + +In addition, all the required data needs to be migrated to ensure the same results appear in the same reports and dashboards that now query data on Azure Synapse. User expectation will undoubtedly be that migration is seamless and there will be no surprises that destroy their confidence in the migrated system on Azure Synapse. So, this is an area where you must take extreme care and communicate as much as possible to allay any fears in your user base. Their expectations are that: + +- Table structure will be the same if directly referred to in queries + +- Table and column names remain the same if directly referred to in queries; for instance, so that calculated fields defined on columns in BI tools don't fail when aggregate reports are produced + +- Historical analysis remains the same + +- Data types should, if possible, remain the same + +- Query behavior remains the same + +- ODBC / JDBC drivers are tested to make sure nothing has changed in terms of query behavior + +> [!TIP] +> Views and SQL queries using proprietary SQL query extensions are likely to result in incompatibilities that impact BI reports and dashboards. + +If BI tools are querying views in the underlying data warehouse or data mart database, then will these views still work? You might think yes, but if there are proprietary SQL extensions, specific to your legacy data warehouse DBMS in these views that have no equivalent in Azure Synapse, you'll need to know about them and find a way to resolve them. + +Other issues like the behavior of nulls or data type variations across DBMS platforms need to be tested, in case they cause slightly different calculation results. Obviously, you want to minimize these issues and take all necessary steps to shield business users from any kind of impact. Depending on your legacy data warehouse system (such as Teradata), there are [tools](../../partner/data-integration.md) that can help hide these differences so that BI tools and applications are kept unaware of them and can run unchanged. + +> [!TIP] +> Use repeatable tests to ensure reports, dashboards, and other visualizations migrate successfully,. + +Testing is critical to visualization and report migration. You need a test suite and agreed-on test data to run and rerun tests in both environments. A test harness is also useful, and a few are mentioned later in this guide. In addition, it's also important to have significant business involvement in this area of migration to keep confidence high and to keep them engaged and part of the project. + +Finally, you may also be thinking about switching BI tools. For example, you might want to [migrate to Power BI](/power-bi/guidance/powerbi-migration-overview). The temptation is to do all of this at the same time, while migrating your schema, data, ETL processing, and more. However, to minimize risk, it's better to migrate to Azure Synapse first and get everything working before undertaking further modernization. + +If your existing BI tools run on premises, ensure that they're able to connect to Azure Synapse through your firewall to run comparisons against both environments. Alternatively, if the vendor of your existing BI tools offers their product on Azure, you can try it there. The same applies for applications running on premises that embed BI or that call your BI server on-demand, requesting a "headless report" with data returned in XML or JSON, for example. + +There's a lot to think about here, so let's look at all this in more detail. + +> [!TIP] +> A lift and shift data warehouse migration are likely to minimize any disruption to reports, dashboards, and other visualizations. + +## Minimize the impact of data warehouse migration on BI tools and reports using data virtualization + +> [!TIP] +> Data virtualization allows you to shield business users from structural changes during migration so that they remain unaware of changes. + +The temptation during data warehouse migration to the cloud is to take the opportunity to make changes during the migration to fulfill long-term requirements, such as opening business requests, missing data, new features, and more. However, if you're going to do that, it can affect BI tool business users and applications accessing your data warehouse, especially if it involves structural changes in your data model. Even if there were no new data structures because of new requirements, but you're considering adopting a different data modeling technique (like Data Vault) in your migrated data warehouse, you're likely to cause structural changes that impact BI reports and dashboards. If you want to adopt an agile data modeling technique, do so after migration. One way in which you can minimize the impact of things like schema changes on BI tools, users, and the reports they produce, is to introduce data virtualization between BI tools and your data warehouse and data marts. The following diagram shows how data virtualization can hide the migration from users. + +:::image type="content" source="../media/4-visualization-reporting/migration-data-virtualization.png" border="true" alt-text="Diagram showing how to hide the migration from users through data virtualization."::: + +This breaks the dependency between business users utilizing self-service BI tools and the physical schema of the underlying data warehouse and data marts that are being migrated. + +> [!TIP] +> Schema alterations to tune your data model for Azure Synapse can be hidden from users. + +By introducing data virtualization, any schema alternations made during data warehouse and data mart migration to Azure Synapse (to optimize performance, for example) can be hidden from business users because they only access virtual tables in the data virtualization layer. If structural changes are needed, only the mappings between the data warehouse or data marts, and any virtual tables would need to be changed so that users remain unaware of those changes and unaware of the migration. [Microsoft partners](../../partner/data-integration.md) provides a useful data virtualization software. + +## Identify high priority reports to migrate first + +A key question when migrating your existing reports and dashboards to Azure Synapse is which ones to migrate first. Several factors can drive the decision. For example: + +- Business value + +- Usage + +- Ease of migration + +- Data migration strategy + +These factors are discussed in more detail later in this article. + +Whatever the decision is, it must involve the business, since they produce the reports and dashboards, and consume the insights these artifacts provide in support of the decisions that are made around your business. That said, if most reports and dashboards can be migrated seamlessly, with minimal effort, and offer up like-for-like results, simply by pointing your BI tool(s) at Azure Synapse, instead of your legacy data warehouse system, then everyone benefits. Therefore, if it's that straight forward and there's no reliance on legacy system proprietary SQL extensions, then there's no doubt that the above ease of migration option breeds confidence. + +### Migrate reports based on usage + +Usage is interesting, since it's an indicator of business value. Reports and dashboards that are never used clearly aren't contributing to supporting any decisions and don't currently offer any value. So, do you have any mechanism for finding out which reports and dashboards are currently not used? Several BI tools provide statistics on usage, which would be an obvious place to start. + +If your legacy data warehouse has been up and running for many years, there's a high chance you could have hundreds, if not thousands, of reports in existence. In these situations, usage is an important indicator to the business value of a specific report or dashboard. In that sense, it's worth compiling an inventory of the reports and dashboards you've and defining their business purpose and usage statistics. + +For those that aren't used at all, it's an appropriate time to seek a business decision, to determine if it necessary to de-commission those reports to optimize your migration efforts. A key question worth asking when deciding to de-commission unused reports is: are they unused because people don't know they exist, or is it because they offer no business value, or have they been superseded by others? + +### Migrate reports based on business value + +Usage on its own isn't a clear indicator of business value. There needs to be a deeper business context to determine the value to the business. In an ideal world, we would like to know the contribution of the insights produced in a report to the bottom line of the business. That's exceedingly difficult to determine, since every decision made, and its dependency on the insights in a specific report, would need to be recorded along with the contribution that each decision makes to the bottom line of the business. You would also need to do this overtime. + +This level of detail is unlikely to be available in most organizations. One way in which you can get deeper on business value to drive migration order is to look at alignment with business strategy. A business strategy set by your executive typically lays out strategic business objectives, key performance indicators (KPIs), and KPI targets that need to be achieved and who is accountable for achieving them. In that sense, classifying your reports and dashboards by strategic business objectives—for example, reduce fraud, improve customer engagement, and optimize business operations—will help understand business purpose and show what objective(s), specific reports, and dashboards these are contributing to. Reports and dashboards associated with high priority objectives in the business strategy can then be highlighted so that migration is focused on delivering business value in a strategic high priority area. + +It's also worthwhile to classify reports and dashboards as operational, tactical, or strategic, to understand the level in the business where they're used. Delivering strategic business objectives contribution is required at all these levels. Knowing which reports and dashboards are used, at what level, and what objectives they're associated with, helps to focus migration on high priority business value that will drive the company forward. Business contribution of reports and dashboards is needed to understand this, perhaps like what is shown in the following **Business strategy objective** table. + +| **Level** | **Report / dashboard name** | **Business purpose** | **Department used** | **Usage frequency** | **Business priority** | +|-|-|-|-|-|-| +| **Strategic** | | | | | | +| **Tactical** | | | | | | +| **Operational** | | | | | | + +While this may seem too time consuming, you need a mechanism to understand the contribution of reports and dashboards to business value, whether you're migrating or not. Catalogs like Azure Data Catalog are becoming very important because they give you the ability to catalog reports and dashboards, automatically capture the metadata associated with them, and let business users tag and rate them to help you understand business value. + +### Migrate reports based on data migration strategy + +> [!TIP] +> Data migration strategy could also dictate which reports and visualizations get migrated first. + +If your migration strategy is based on migrating "data marts first", clearly, the order of data mart migration will have a bearing on which reports and dashboards can be migrated first to run on Azure Synapse. Again, this is likely to be a business-value-related decision. Prioritizing which data marts are migrated first reflects business priorities. Metadata discovery tools can help you here by showing you which reports rely on data in which data mart tables. + +## Migration incompatibility issues that can impact reports and visualizations + +When it comes to migrating to Azure Synapse, there are several things that can impact the ease of migration for reports, dashboards, and other visualizations. The ease of migration is affected by: + +- Incompatibilities that occur during schema migration between your legacy data warehouse and Azure Synapse. + +- Incompatibilities in SQL between your legacy data warehouse and Azure Synapse. + +### The impact of schema incompatibilities + +> [!TIP] +> Schema incompatibilities include legacy warehouse DBMS table types and data types that are unsupported on Azure Synapse. + +BI tool reports and dashboards, and other visualizations, are produced by issuing SQL queries that access physical tables and/or views in your data warehouse or data mart. When it comes to migrating your data warehouse or data mart schema to Azure Synapse, there may be incompatibilities that can impact reports and dashboards, such as: + +- Non-standard table types supported in your legacy data warehouse DBMS that don't have an equivalent in Azure Synapse (like the Teradata Time-Series tables) + +- Data types supported in your legacy data warehouse DBMS that don't have an equivalent in Azure Synapse. For example, Teradata Geospatial or Interval data types. + +In many cases, where there are incompatibilities, there may be ways around them. For example, the data in unsupported table types can be migrated into a standard table with appropriate data types and indexed or partitioned on a date/time column. Similarly, it may be able to represent unsupported data types in another type of column and perform calculations in Azure Synapse to achieve the same. Either way, it will need refactoring. + +> [!TIP] +> Querying the system catalog of your legacy warehouse DBMS is a quick and straightforward way to identify schema incompatibilities with Azure Synapse. + +To identify reports and visualizations impacted by schema incompatibilities, run queries against the system catalog of your legacy data warehouse to identify tables with unsupported data types. Then use metadata from your BI tool or tools to identify reports that access these structures, to see what could be impacted. Obviously, this will depend on the legacy data warehouse DBMS you're migrating from. Find details of how to identify these incompatibilities in [Design and performance for Teradata migrations](1-design-performance-migration.md). + +The impact may be less than you think, because many BI tools don't support such data types. As a result, views may already exist in your legacy data warehouse that `CAST` unsupported data types to more generic types. + +### The impact of SQL incompatibilities and differences + +Additionally, any report, dashboard, or other visualization in an application or tool that makes use of proprietary SQL extensions associated with your legacy data warehouse DBMS, is likely to be impacted when migrating to Azure Synapse. This could happen because the BI tool or application: + +- Accesses legacy data warehouse DBMS views that include proprietary SQL functions that have no equivalent in Azure Synapse. + +- Issues SQL queries, which include proprietary SQL functions peculiar to the SQL dialect of your legacy data warehouse DBMS, that have no equivalent in Azure Synapse. + +### Gauge the impact of SQL incompatibilities on your reporting portfolio + +You can't rely on documentation associated with reports, dashboards, and other visualizations to gauge how big of an impact SQL incompatibility may have on the portfolio of embedded query services, reports, dashboards, and other visualizations you're intending to migrate to Azure Synapse. There must be a more precise way of doing that. + +#### Use EXPLAIN statements to find SQL incompatibilities + +> [!TIP] +> Gauge the impact of SQL incompatibilities by harvesting your DBMS log files and running `EXPLAIN` statements. + +One way is to get a hold of the SQL log files of your legacy data warehouse. Use a script to pull out a representative set of SQL statements into a file, prefix each SQL statement with an `EXPLAIN` statement, and then run all the `EXPLAIN` statements in Azure Synapse. Any SQL statements containing proprietary SQL extensions from your legacy data warehouse that are unsupported will be rejected by Azure Synapse when the `EXPLAIN` statements are executed. This approach would at least give you an idea of how significant or otherwise the use of incompatible SQL is. + +Metadata from your legacy data warehouse DBMS will also help you when it comes to views. Again, you can capture and view SQL statements, and `EXPLAIN` them as described previously to identify incompatible SQL in views. + +## Test report and dashboard migration to Azure Synapse Analytics + +> [!TIP] +> Test performance and tune to minimize compute costs. + +A key element in data warehouse migration is the testing of reports and dashboards against Azure Synapse to verify that the migration has worked. To do this, you need to define a series of tests and a set of required outcomes for each test that needs to be run to verify success. It's important to ensure that reports and dashboards are tested and compared across your existing and migrated data warehouse systems to: + +- Identify whether schema changes made during migration such as data types to be converted, have impacted reports in terms of ability to run, results, and corresponding visualizations. + +- Verify all users are migrated. + +- Verify all roles are migrated and users assigned to those roles. + +- Verify all data access security privileges are migrated to ensure access control list (ACL) migration. + +- Ensure consistent results of all known queries, reports, and dashboards. + +- Ensure that data and ETL migration is complete and error free. + +- Ensure data privacy is upheld. + +- Test performance and scalability. + +- Test analytical functionality. + +For information about how to migrate users, user groups, roles, and privileges, see the [Security, access, and operations for Teradata migrations](3-security-access-operations.md) which is part of this series of articles. + +> [!TIP] +> Build an automated test suite to make tests repeatable. + +It's also best practice to automate testing as much as possible, to make each test repeatable and to allow a consistent approach to evaluating results. This works well for known regular reports, and could be managed via [Synapse pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) orchestration. If you already have a suite of test queries in place for regression testing, you could use the testing tools to automate the post migration testing. + +> [!TIP] +> Leverage tools that can compare metadata lineage to verify results. + +Ad-hoc analysis and reporting are more challenging and requires a set of tests to be compiled to verify that results are consistent across your legacy data warehouse DBMS and Azure Synapse. If reports and dashboards are inconsistent, then having the ability to compare metadata lineage across original and migrated systems is extremely valuable during migration testing, as it can highlight differences and pinpoint where they occurred when these aren't easy to detect. This is discussed in more detail later in this article. + +In terms of security, the best way to do this is to create roles, assign access privileges to roles, and then attach users to roles. To access your newly migrated data warehouse, set up an automated process to create new users, and to do role assignment. To detach users from roles, you can follow the same steps. + +It's also important to communicate the cut-over to all users, so they know what's changing and what to expect. + +## Analyze lineage to understand dependencies between reports, dashboards, and data + +> [!TIP] +> Having access to metadata and data lineage from reports all the way back to data source is critical for verifying that migrated reports are working correctly. + +A critical success factor in migrating reports and dashboards is understanding lineage. Lineage is metadata that shows the journey that data has taken, so you can see the path from the report/dashboard all the way back to where the data originates. It shows how data has gone from point to point, its location in the data warehouse and/or data mart, and where it's used—for example, in what reports. It helps you understand what happens to data as it travels through different data stores—files and database—different ETL pipelines, and into reports. If business users have access to data lineage, it improves trust, breeds confidence, and enables more informed business decisions. + +> [!TIP] +> Tools that automate metadata collection and show end-to- end lineage in a multi-vendor environment are valuable when it comes to migration. + +In multi-vendor data warehouse environments, business analysts in BI teams may map out data lineage. For example, if you've Informatica for your ETL, Oracle for your data warehouse, and Tableau for reporting, each of which have their own metadata repository, figuring out where a specific data element in a report came from can be challenging and time consuming. + +To migrate seamlessly from a legacy data warehouse to Azure Synapse, end-to-end data lineage helps prove like-for-like migration when comparing reports and dashboards against your legacy environment. That means that metadata from several tools needs to be captured and integrated to show the end to end journey. Having access to tools that support automated metadata discovery and data lineage will let you see duplicate reports and ETL processes and reports that rely on data sources that are obsolete, questionable, or even non-existent. With this information, you can reduce the number of reports and ETL processes that you migrate. + +You can also compare end-to-end lineage of a report in Azure Synapse against the end-to-end lineage, for the same report in your legacy data warehouse environment, to see if there are any differences that have occurred inadvertently during migration. This helps enormously with testing and verifying migration success. + +Data lineage visualization not only reduces time, effort, and error in the migration process, but also enables faster execution of the migration project. + +By leveraging automated metadata discovery and data lineage tools that can compare lineage, you can verify if a report is produced using data migrated to Azure Synapse and if it's produced in the same way as in your legacy environment. This kind of capability also helps you determine: + +- What data needs to be migrated to ensure successful report and dashboard execution on Azure Synapse + +- What transformations have been and should be performed to ensure successful execution on Azure Synapse + +- How to reduce report duplication + +This substantially simplifies the data migration process, because the business will have a better idea of the data assets it has and what needs to be migrated to enable a solid reporting environment on Azure Synapse. + +> [!TIP] +> Azure Data Factory and several third-party ETL tools support lineage. + +Several ETL tools provide end-to-end lineage capability, and you may be able to make use of this via your existing ETL tool if you're continuing to use it with Azure Synapse. Microsoft [Synapse pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de) or [Azure Data Factory](../../../data-factory/introduction.md?msclkid=2ccc66eccfde11ecaa58877e9d228779) lets you view lineage in mapping flows. Also, [Microsoft partners](../../partner/data-integration.md) provide automated metadata discovery, data lineage, and lineage comparison tools. + +## Migrate BI tool semantic layers to Azure Synapse Analytics + +> [!TIP] +> Some BI tools have semantic layers that simplify business user access to physical data structures in your data warehouse or data mart, like SAP Business Objects and IBM Cognos. + +Some BI tools have what is known as a semantic metadata layer. The role of this metadata layer is to simplify business user access to physical data structures in an underlying data warehouse or data mart database. It does this by providing high-level objects like dimensions, measures, hierarchies, calculated metrics, and joins. These objects use business terms familiar to business analysts and are mapped to the physical data structures in the data warehouse or data mart database. + +When it comes to data warehouse migration, changes to column names or table names may be forced upon you. For example, in Oracle, table names can have a "#". In Azure Synapse, the "#" is only allowed as a prefix to a table name to indicate a temporary table. Therefore, you may need to change a table name if migrating from Oracle. You may need to do rework to change mappings in such cases. + +A good way to get everything consistent across multiple BI tools is to create a universal semantic layer, using common data names for high-level objects like dimensions, measures, hierarchies, and joins, in a data virtualization server (as shown in the next diagram) that sits between applications, BI tools, and Azure Synapse. This allows you to set up everything once (instead of in every tool), including calculated fields, joins and mappings, and then point all BI tools at the data virtualization server. + +> [!TIP] +> Use data virtualization to create a common semantic layer to guarantee consistency across all BI tools in an Azure Synapse environment. + +In this way, you get consistency across all BI tools, while at the same time breaking the dependency between BI tools and applications, and the underlying physical data structures in Azure Synapse. Use [Microsoft partners](../../partner/data-integration.md) on Azure to implement this. The following diagram shows how a common vocabulary in the Data Virtualization server lets multiple BI tools see a common semantic layer. + +:::image type="content" source="../media/4-visualization-reporting/data-virtualization-semantics.png" border="true" alt-text="Diagram with common data names and definitions that relate to the data virtualization server."::: + +## Conclusions + +> [!TIP] +> Identify incompatibilities early to gauge the extent of the migration effort. Migrate your users, group roles and privilege assignments. Only migrate the reports and visualizations that are used and are contributing to business value. + +In a lift-and-shift data warehouse migration to Azure Synapse, most reports and dashboards should migrate easily. + +However, if data structures change, then data is stored in unsupported data types or access to data in the data warehouse or data mart is via a view that includes proprietary SQL that's unsupported in your Azure Synapse environment. You'll need to deal with those issues if they arise. + +You can't rely on documentation to find out where the issues are likely to be. Making use of `EXPLAIN` statements is a pragmatic and quick way to identify incompatibilities in SQL. Rework these to achieve similar results in Azure Synapse. In addition, it's recommended that you make use of automated metadata discovery and lineage tools to help you identify duplicate reports, reports that are no longer valid because they're using data from data sources that you no longer use, and to understand dependencies. Some of these tools help compare lineage to verify that reports running in your legacy data warehouse environment are produced identically in Azure Synapse. + +Don't migrate reports that you no longer use. BI tool usage data can help determine which ones aren't in use. For the visualizations and reports that you do want to migrate, migrate all users, user groups, roles, and privileges, and associate these reports with strategic business objectives and priorities to help you identify report insight contribution to specific objectives. This is useful if you're using business value to drive your report migration strategy. If you're migrating by data store,—data mart by data mart—then metadata will also help you identify which reports are dependent on which tables and views, so that you can focus on migrating to these first. + +Finally, consider data virtualization to shield BI tools and applications from structural changes to the data warehouse and/or the data mart data model that may occur during migration. You can also use a common vocabulary with data virtualization to define a common semantic layer that guarantees consistent common data names, definitions, metrics, hierarchies, joins, and more across all BI tools and applications in a migrated Azure Synapse environment. + +## Next steps + +To learn more about minimizing SQL issues, see the next article in this series: [Minimizing SQL issues for Teradata migrations](5-minimize-sql-issues.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/5-minimize-sql-issues.md b/articles/synapse-analytics/migration-guides/teradata/5-minimize-sql-issues.md new file mode 100644 index 000000000000..5390eaceaa4c --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/5-minimize-sql-issues.md @@ -0,0 +1,378 @@ +--- +title: "Minimize SQL issues for Teradata migrations" +description: Learn how to minimize the risk of SQL issues when migrating from Teradata to Azure Synapse. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Minimize SQL issues for Teradata migrations + +This article is part five of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for minimizing SQL issues. + +## Overview + +### Characteristics of Teradata environments + +> [!TIP] +> Teradata pioneered large scale SQL databases using MPP in the 1980s. + +In 1984, Teradata initially released their database product. It introduced massively parallel processing (MPP) techniques to enable data processing at a scale more efficiently than the existing mainframe technologies available at the time. Since then, the product has evolved and has many installations among large financial institutions, telecommunications, and retail companies. The original implementation used proprietary hardware and was channel attached to mainframes—typically IBM or IBM-compatible processors. + +While more recent announcements have included network connectivity and the availability of Teradata technology stack in the cloud (including Azure), most existing installations are on premises, so many users are considering migrating some or all their Teradata data to Azure Synapse to gain the benefits of a move to a modern cloud environment. + +> [!TIP] +> Many existing Teradata installations are data warehouses using a dimensional data model. + +Teradata technology is often used to implement a data warehouse, supporting complex analytic queries on large data volumes using SQL. Dimensional data models—star or snowflake schemas—are common, as is the implementation of data marts for individual departments. + +This combination of SQL and dimensional data models simplifies migration to Azure Synapse, since the basic concepts and SQL skills are transferable. The recommended approach is to migrate the existing data model as-is to reduce risk and time taken. Even if the eventual intention is to make changes to the data model (for example, moving to a Data Vault model), perform an initial as-is migration and then make changes within the Azure cloud environment, leveraging the performance, elastic scalability, and cost advantages there. + +While the SQL language has been standardized, individual vendors have in some cases implemented proprietary extensions. This document highlights potential SQL differences you may encounter while migrating from a legacy Teradata environment, and to provide workarounds. + +### Use an Azure VM Teradata instance as part of a migration + +> [!TIP] +> Use an Azure VM to create a temporary Teradata instance to speed up migration and minimize impact on the source system. + +Leverage the Azure environment when running a migration from an on-premises Teradata environment. Azure provides affordable cloud storage and elastic scalability to create a Teradata instance within a VM in Azure, collocated with the target Azure Synapse environment. + +With this approach, standard Teradata utilities such as Teradata Parallel Data Transporter (or third-party data replication tools such as Attunity Replicate) can be used to efficiently move the subset of Teradata tables that are to be migrated onto the VM instance, and then all migration tasks can take place within the Azure environment. This approach has several benefits: + +- After the initial replication of data, the source system isn't impacted by the migration tasks + +- The familiar Teradata interfaces, tools and utilities are available within the Azure environment + +- Once in the Azure environment there are no potential issues with network bandwidth availability between the on-premises source system and the cloud target system + +- Tools such as Azure Data Factory can efficiently call utilities such as Teradata Parallel Transporter to migrate data quickly and easily + +- The migration process is orchestrated and controlled entirely within the Azure environment + +### Use Azure Data Factory to implement a metadata-driven migration + +> [!TIP] +> Automate the migration process by using Azure Data Factory capabilities. + +Automate and orchestrate the migration process by making use of the capabilities in the Azure environment. This approach also minimizes the migration's impact on the existing Teradata environment, which may already be running close to full capacity. + +Azure Data Factory is a cloud-based data integration service that allows creation of data-driven workflows in the cloud for orchestrating and automating data movement and data transformation. Using Data Factory, you can create and schedule data-driven workflows—called pipelines—that can ingest data from disparate data stores. It can process and transform data by using compute services such as Azure HDInsight Hadoop, Spark, Azure Data Lake Analytics, and Azure Machine Learning. + +By creating metadata to list the data tables to be migrated and their location, you can use the Data Factory facilities to manage and automate parts of the migration process. You can also use [Synapse pipelines](../../get-started-pipelines.md?msclkid=8f3e7e96cfed11eca432022bc07c18de). + +## SQL DDL differences between Teradata and Azure Synapse + +### SQL Data Definition Language (DDL) + +> [!TIP] +> SQL DDL commands `CREATE TABLE` and `CREATE VIEW` have standard core elements but are also used to define implementation-specific options. + +The ANSI SQL standard defines the basic syntax for DDL commands such as `CREATE TABLE` and `CREATE VIEW`. These commands are used within both Teradata and Azure Synapse, but they've also been extended to allow definition of implementation-specific features such as indexing, table distribution and partitioning options. + +The following sections discuss Teradata-specific options to consider during a migration to Azure Synapse. + +### Table considerations + +> [!TIP] +> Use existing indexes to give an indication of candidates for indexing in the migrated warehouse. + +When migrating tables between different technologies, only the raw data and its descriptive metadata gets physically moved between the two environments. Other database elements from the source system, such as indexes and log files, aren't directly migrated as these may not be needed or may be implemented differently within the new target environment. For example, there's no equivalent of the `MULTISET` option within Teradata's `CREATE TABLE` syntax. + +It's important to understand where performance optimizations—such as indexes—were used in the source environment. This indicates where performance optimization can be added in the new target environment. For example, if a NUSI has been created in the source Teradata environment, this might indicate that a non-clustered index should be created in the migrated Azure Synapse. Other native performance optimization techniques, such as table replication, may be more applicable than a straight 'like for like' index creation. + +### Unsupported Teradata table types + +> [!TIP] +> Standard tables within Azure Synapse can support migrated Teradata time series and temporal tables. + +Teradata includes support for special table types for time series and temporal data. The syntax and some of the functions for these table types isn't directly supported within Azure Synapse, but the data can be migrated into a standard table with appropriate data types and indexing or partitioning on the date/time column. + +Teradata implements the temporal query functionality via query rewriting to add additional filters within a temporal query to limit the applicable date range. If this functionality is currently in use within the source Teradata environment and is to be migrated, then this additional filtering will need to be added into the relevant temporal queries. + +The Azure environment also includes specific features for complex analytics on time—series data at scale called [time series insights](https://azure.microsoft.com/services/time-series-insights/)—this is aimed at IoT data analysis applications and may be more appropriate for this use-case. + +### Teradata data type mapping + +> [!TIP] +> Assess the impact of unsupported data types as part of the preparation phase. + +Most Teradata data types have a direct equivalent in Azure Synapse. This table shows these data types together with the recommended approach for handling them. In the table, Teradata column type is the type that's stored within the system catalog—for example, in `DBC.ColumnsV`. + +| Teradata column type | Teradata data type | Azure Synapse data type | +|----------------------|--------------------|----------------| +| ++ | TD_ANYTYPE | Not supported in Azure Synapse | +| A1 | ARRAY | Not supported in Azure Synapse | +| AN | ARRAY | Not supported in Azure Synapse | +| AT | TIME | TIME | +| BF | BYTE | BINARY | +| BO | BLOB | BLOB data type isn\'t directly supported but can be replaced with BINARY | +| BV | VARBYTE | BINARY | +| CF | VARCHAR | CHAR | +| CO | CLOB | CLOB data type isn\'t directly supported but can be replaced with VARCHAR | +| CV | VARCHAR | VARCHAR | +| D | DECIMAL | DECIMAL | +| DA | DATE | DATE | +| DH | INTERVAL DAY TO HOUR | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| DM | INTERVAL DAY TO MINUTE | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| DS | INTERVAL DAY TO SECOND | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| DT | DATASET | DATASET data type is supported in Azure Synapse | +| DY | INTERVAL DAY | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| F | FLOAT | FLOAT | +| HM | INTERVAL HOUR TO MINUTE | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| HR | INTERVAL HOUR | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| HS | INTERVAL HOUR TO SECOND | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| I1 | BYTEINT | TINYINT | +| I2 | SMALLINT | SMALLINT | +| I8 | BIGINT | BIGINT | +| I | INTEGER | INT | +| JN | JSON | JSON data type isn't currently directly supported within Azure Synapse, but JSON data can be stored in a VARCHAR field | +| MI | INTERVAL MINUTE | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| MO | INTERVAL MONTH | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| MS | INTERVAL MINUTE TO SECOND | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| N | NUMBER | NUMERIC | +| PD | PERIOD(DATE) | Can be converted to VARCHAR or split into two separate dates | +| PM | PERIOD (TIMESTAMP WITH TIME ZONE) | Can be converted to VARCHAR or split into two separate timestamps (DATETIMEOFFSET). | +| PS | PERIOD(TIMESTAMP) | Can be converted to VARCHAR or split into two separate timestamps (DATETIMEOFFSET). | +| PT | PERIOD(TIME) | Can be converted to VARCHAR or split into two separate times. | +| PZ | PERIOD (TIME WITH TIME ZONE) | Can be converted to VARCHAR or split into two separate times but WITH TIME ZONE isn\'t supported for TIME. | +| SC | INTERVAL SECOND | INTERVAL data types aren\'t supported in Azure Synapse, but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| SZ | TIMESTAMP WITH TIME ZONE | DATETIMEOFFSET | +| TS | TIMESTAMP | DATETIME or DATETIME2 | +| TZ | TIME WITH TIME ZONE | TIME WITH TIME ZONE isn\'t supported because TIME is stored using \"wall clock\" time only without a time zone offset | +| XM | XML | XML data type isn't currently directly supported within Azure Synapse, but XML data can be stored in a VARCHAR field | +| YM | INTERVAL YEAR TO MONTH | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | +| YR | INTERVAL YEAR | INTERVAL data types aren\'t supported in Azure Synapse. but date calculations can be done with the date comparison functions (for example, DATEDIFF and DATEADD) | + +Use the metadata from the Teradata catalog tables to determine whether any of these data types are to be migrated and allow for this in the migration plan. For example, use a SQL query like this one to find any occurrences of unsupported data types that need attention. + +```sql +SELECT +ColumnType, CASE +WHEN ColumnType = '++' THEN 'TD_ANYTYPE' +WHEN ColumnType = 'A1' THEN 'ARRAY' WHEN +ColumnType = 'AN' THEN 'ARRAY' WHEN +ColumnType = 'BO' THEN 'BLOB' +WHEN ColumnType = 'CO' THEN 'CLOB' +WHEN ColumnType = 'DH' THEN 'INTERVAL DAY TO HOUR' WHEN +ColumnType = 'DM' THEN 'INTERVAL DAY TO MINUTE' WHEN +ColumnType = 'DS' THEN 'INTERVAL DAY TO SECOND' WHEN +ColumnType = 'DT' THEN 'DATASET' +WHEN ColumnType = 'DY' THEN 'INTERVAL DAY' +WHEN ColumnType = 'HM' THEN 'INTERVAL HOUR TO MINUTE' WHEN +ColumnType = 'HR' THEN 'INTERVAL HOUR' +WHEN ColumnType = 'HS' THEN 'INTERVAL HOUR TO SECOND' WHEN +ColumnType = 'JN' THEN 'JSON' +WHEN ColumnType = 'MI' THEN 'INTERVAL MINUTE' WHEN +ColumnType = 'MO' THEN 'INTERVAL MONTH' +WHEN ColumnType = 'MS' THEN 'INTERVAL MINUTE TO SECOND' WHEN +ColumnType = 'PD' THEN 'PERIOD(DATE)' +WHEN ColumnType = 'PM' THEN 'PERIOD (TIMESTAMP WITH TIME ZONE)' +WHEN ColumnType = 'PS' THEN 'PERIOD(TIMESTAMP)' WHEN +ColumnType = 'PT' THEN 'PERIOD(TIME)' +WHEN ColumnType = 'PZ' THEN 'PERIOD (TIME WITH TIME ZONE)' WHEN +ColumnType = 'SC' THEN 'INTERVAL SECOND' +WHEN ColumnType = 'SZ' THEN 'TIMESTAMP WITH TIME ZONE' WHEN +ColumnType = 'XM' THEN 'XML' +WHEN ColumnType = 'YM' THEN 'INTERVAL YEAR TO MONTH' WHEN +ColumnType = 'YR' THEN 'INTERVAL YEAR' +END AS Data_Type, +COUNT (*) AS Data_Type_Count FROM +DBC.ColumnsV +WHERE DatabaseName IN ('UserDB1', 'UserDB2', 'UserDB3') -- select databases to be migrated +GROUP BY 1,2 +ORDER BY 1; +``` + +> [!TIP] +> Third-party tools and services can automate data mapping tasks. + +There are third-party vendors who offer tools and services to automate migration, including the mapping of data types. If a third-party ETL tool such as Informatica or Talend is already in use in the Teradata environment, those tools can implement any required data transformations. + +### Data Definition Language (DDL) generation + +> [!TIP] +> Use existing Teradata metadata to automate the generation of `CREATE TABLE` and `CREATE VIEW DDL` for Azure Synapse. + +Edit existing Teradata `CREATE TABLE` and `CREATE VIEW` scripts to create the equivalent definitions with modified data types as described previously if necessary. Typically, this involves removing extra Teradata-specific clauses such as `FALLBACK` or `MULTISET`. + +However, all the information that specifies the current definitions of tables and views within the existing Teradata environment is maintained within system catalog tables. This is the best source of this information as it's guaranteed to be up to date and complete. Be aware that user-maintained documentation may not be in sync with the current table definitions. + +Access this information via views onto the catalog such as `DBC.ColumnsV` and generate the equivalent `CREATE TABLE DDL` statements for the equivalent tables in Azure Synapse. + +> [!TIP] +> Third-party tools and services can automate data mapping tasks. + +There are [Microsoft partners](../../partner/data-integration.md) who offer tools and services to automate migration, including data-type mapping. Also, if a third-party ETL tool such as Informatica or Talend is already in use in the Teradata environment, that tool can implement any required data transformations. + +## SQL DML differences between Teradata and Azure Synapse + +### SQL Data Manipulation Language (DML) + +> [!TIP] +> SQL DML commands `SELECT`, `INSERT` and `UPDATE` have standard core elements but may also implement different syntax options. + +The ANSI SQL standard defines the basic syntax for DML commands such as `SELECT`, `INSERT`, `UPDATE` and `DELETE`. Both Teradata and Azure Synapse use these commands, but in some cases there are implementation differences. + +The following sections discuss the Teradata-specific DML commands that you should consider during a migration to Azure Synapse. + +### SQL DML syntax differences + +Be aware of these differences in SQL Data Manipulation Language (DML) syntax between Teradata SQL and Azure Synapse when migrating: + +- `QUALIFY`—Teradata supports the `QUALIFY` operator. For example: + + ```sql + SELECT col1 + FROM tab1 + WHERE col1='XYZ' + QUALIFY ROW_NUMBER () OVER (PARTITION by + col1 ORDER BY col1) = 1; + ``` + + The equivalent Azure Synapse syntax is: + + ```sql + SELECT * FROM ( + SELECT col1, ROW_NUMBER () OVER (PARTITION by col1 ORDER BY col1) rn + FROM tab1 WHERE col1='XYZ' + ) WHERE rn = 1; + ``` + +- Date Arithmetic—Azure Synapse has operators such as `DATEADD` and `DATEDIFF` which can be used on `DATE` or `DATETIME` fields. Teradata supports direct subtraction on dates such as `SELECT DATE1—DATE2 FROM...`. + +- In Group by ordinal, explicitly provide the T-SQL column name. + +- `LIKE ANY`—Teradata supports `LIKE ANY` syntax such as: + + ```sql + SELECT * FROM CUSTOMER + WHERE POSTCODE LIKE ANY + ('CV1%', 'CV2%', 'CV3%'); + ``` + + The equivalent in Azure Synapse syntax is: + + ```sql + SELECT * FROM CUSTOMER + WHERE + (POSTCODE LIKE 'CV1%') OR (POSTCODE LIKE 'CV2%') OR (POSTCODE LIKE 'CV3%'); + ``` + +- Depending on system settings, character comparisons in Teradata may be case insensitive by default. In Azure Synapse, character comparisons are always case sensitive. + +### Use EXPLAIN to validate legacy SQL + +> [!TIP] +> Use real queries from the existing system query logs to find potential migration issues. + +One way of testing legacy Teradata SQL for compatibility with Azure Synapse is to capture some representative SQL statements from the legacy system query logs, prefix those queries with [EXPLAIN](/sql/t-sql/queries/explain-transact-sql?msclkid=91233fc1cff011ec9dff597671b7ae97), and (assuming a 'like for like' migrated data model in Azure Synapse with the same table and column names) run those `EXPLAIN` statements in Azure Synapse. Any incompatible SQL will throw an error—use this information to determine the scale of the recoding task. This approach doesn't require that data is loaded into the Azure environment, only that the relevant tables and views have been created. + +### Functions, stored procedures, triggers, and sequences + +> [!TIP] +> As part of the preparation phase, assess the number and type of non-data objects being migrated. + +When migrating from a mature legacy data warehouse environment such as Teradata, there are often elements other than simple tables and views that need to be migrated to the new target environment. Examples of this include functions, stored procedures, triggers, and sequences. + +As part of the preparation phase, create an inventory of the objects that need to be migrated and define the methods for handling them. Then assign an appropriate allocation of resources in the project plan. + +There may be facilities in the Azure environment that replace the functionality implemented as either functions or stored procedures in the Teradata environment. In this case, it's often more efficient to use the built-in Azure facilities rather than recoding the Teradata functions. + +> [!TIP] +> Third-party products and services can automate migration of non-data elements. + +[Microsoft partners](../../partner/data-integration.md) offer tools and services that can automate the migration. + +See the following sections for more information on each of these elements. + +#### Functions + +As with most database products, Teradata supports system functions and user-defined functions within the SQL implementation. When migrating to another database platform such as Azure Synapse, common system functions are available and can be migrated without change. Some system functions may have slightly different syntax, but the required changes can be automated. System functions where there's no equivalent, such arbitrary user-defined functions, may need to be recoded using the languages available in the target environment. Azure Synapse uses the popular Transact-SQL language to implement user-defined functions. + +#### Stored procedures + +Most modern database products allow for procedures to be stored within the database. Teradata provides the SPL language for this purpose. A stored procedure typically contains SQL statements and some procedural logic, and may return data or a status. + +The dedicated SQL pools of Azure Synapse Analytics also support stored procedures using T-SQL, so if you must migrate stored procedures, recode them accordingly. + +#### Triggers + +Azure Synapse doesn't support the creation of triggers, but you can implement them within Azure Data Factory. + +#### Sequences + +Azure Synapse sequences are handled in a similar way to Teradata, using [Identity to create surrogate keys](../../sql-data-warehouse/sql-data-warehouse-tables-identity.md) or [managed identity](../../../data-factory/data-factory-service-identity.md?tabs=data-factory). + +#### Teradata to T-SQL mapping + +This table shows the Teradata to T-SQL compliant with Azure Synapse SQL data type mapping: + +| Teradata Data Type | Azure Synapse SQL Data Type | +|----------------------------------------|-----------------------------| +| bigint  | bigint | +| bool  | bit | +| boolean  | bit | +| byteint  | tinyint | +| char \[(*p*)\]  | char \[(*p*)\] | +| char varying \[(*p*)\]  | varchar \[(*p*)\] | +| character \[(*p*)\]  | char \[(*p*)\] | +| character varying \[(*p*)\]  | varchar \[(*p*)\] | +| date  | date | +| datetime  | datetime | +| dec \[(*p*\[,*s*\])\]  | decimal \[(*p*\[,*s*\])\]  | +| decimal \[(*p*\[,*s*\])\]  | decimal \[(*p*\[,*s*\])\] | +| double  | float(53) | +| double precision  | float(53) | +| float \[(*p*)\]  | float \[(*p*)\] | +| float4  | float(53) | +| float8  | float(53) | +| int  | int | +| int1  | tinyint  | +| int2  | smallint | +| int4  | int  | +| int8  | bigint  | +| integer  | integer | +| interval  | *Not supported* | +| national char varying \[(*p*)\]  | nvarchar \[(*p*)\]  | +| national character \[(*p*)\]  | nchar \[(*p*)\] | +| national character varying \[(*p*)\]  | nvarchar \[(*p*)\] | +| nchar \[(*p*)\]  | nchar \[(*p*)\] | +| numeric \[(*p*\[,*s*\])\]  | numeric \[(*p*\[,*s*\]) | +| nvarchar \[(*p*)\]  | nvarchar \[(*p*)\] | +| real  | real | +| smallint  | smallint | +| time  | time | +| time with time zone  | datetimeoffset | +| time without time zone  | time | +| timespan  | *Not supported* | +| timestamp  | datetime2 | +| timetz  | datetimeoffset | +| varchar \[(*p*)\]  | varchar \[(*p*)\] | + +## Summary + +Typical existing legacy Teradata installations are implemented in a way that makes migration to Azure Synapse easy. They use SQL for analytical queries on large data volumes, and are in some form of dimensional data model. These factors make it a good candidate for migration to Azure Synapse. + +To minimize the task of migrating the actual SQL code, follow these recommendations: + +- Initial migration of the data warehouse should be as-is to minimize risk and time taken, even if the eventual final environment will incorporate a different data model such as Data Vault. + +- Consider using a Teradata instance in an Azure VM as a stepping stone as part of the migration process. + +- Understand the differences between Teradata SQL implementation and Azure Synapse. + +- Use metadata and query logs from the existing Teradata implementation to assess the impact of the differences and plan an approach to mitigate. + +- Automate the process wherever possible to minimize errors, risk, and time for the migration. + +- Consider using specialist [Microsoft partners](../../partner/data-integration.md) and services to streamline the migration. + +## Next steps + +To learn more about Microsoft and third-party tools, see the next article in this series: [Tools for Teradata data warehouse migration to Azure Synapse Analytics](6-microsoft-third-party-migration-tools.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/6-microsoft-third-party-migration-tools.md b/articles/synapse-analytics/migration-guides/teradata/6-microsoft-third-party-migration-tools.md new file mode 100644 index 000000000000..f2e4dbb55dc4 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/6-microsoft-third-party-migration-tools.md @@ -0,0 +1,132 @@ +--- +title: "Tools for Teradata data warehouse migration to Azure Synapse Analytics" +description: Learn about Microsoft and third-party data and database migration tools that can help you migrate from Teradata to Azure Synapse. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Tools for Teradata data warehouse migration to Azure Synapse Analytics + +This article is part six of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for Microsoft and third-party tools. + +## Data warehouse migration tools + +By migrating your existing data warehouse to Azure Synapse, you benefit from: + +- A globally secure, scalable, low-cost, cloud-native, pay-as-you-use analytical database. + +- The rich Microsoft analytical ecosystem that exists on Azure. This ecosystem consists of technologies to help modernize your data warehouse once it's migrated, and extends your analytical capabilities to drive new value. + +Several tools from Microsoft and third-party partner vendors can help you migrate your existing data warehouse to Azure Synapse. These tools include: + +- Microsoft data and database migration tools. + +- Third-party data warehouse automation tools to automate and document the migration to Azure Synapse. + +- Third-party data warehouse migration tools to migrate schema and data to Azure Synapse. + +- Third-party tools to minimize the impact on SQL differences between your existing data warehouse DBMS and Azure Synapse. + +The following sections discuss these tools in more detail. + +## Microsoft data migration tools + +> [!TIP] +> Data Factory includes tools to help migrate your data and your entire data warehouse to Azure. + +Microsoft offers several tools to help you migrate your existing data warehouse to Azure Synapse, such as: + +- Microsoft Azure Data Factory. + +- Microsoft services for physical data transfer. + +- Microsoft services for data ingestion. + +### Microsoft Azure Data Factory + +Microsoft Azure Data Factory is a fully managed, pay-as-you-use, hybrid data integration service for highly scalable ETL and ELT processing. It uses Spark to process and analyze data in parallel and in memory to maximize throughput. + +> [!TIP] +> Data Factory allows you to build scalable data integration pipelines code-free. + +[Azure Data Factory connectors](../../../data-factory/connector-overview.md?msclkid=00086e4acff211ec9263dee5c7eb6e69) connect to external data sources and databases and have templates for common data integration tasks. A visual front-end, browser-based UI enables non-programmers to create and run process pipelines to ingest, transform, and load data. More experienced programmers have the option to incorporate custom code, such as Python programs. + +> [!TIP] +> Data Factory enables collaborative development between business and IT professionals. + +Data Factory is also an orchestration tool. It's the best Microsoft tool to automate the end-to-end migration process to reduce risk and make the migration process easily repeatable. The following diagram shows a Data Factory mapping data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory mapping dataflow." lightbox="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png"::: + +The next screenshot shows a Data Factory wrangling data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png" border="true" alt-text="Screenshot showing an example of Azure Data Factory wrangling dataflows."::: + +You can develop simple or comprehensive ETL and ELT processes without coding or maintenance with a few clicks. These processes ingest, move, prepare, transform, and process your data. You can design and manage scheduling and triggers in Azure Data Factory to build an automated data integration and loading environment. In Data Factory, you can define, manage, and schedule PolyBase bulk data load processes. + +> [!TIP] +> Data Factory includes tools to help migrate your data and your entire data warehouse to Azure. + +You can use Data Factory to implement and manage a hybrid environment that includes on-premises, cloud, streaming and SaaS data—for example, from applications like Salesforce—in a secure and consistent way. + +A new capability in Data Factory is wrangling data flows. This opens up Data Factory to business users who want to visually discover, explore, and prepare data at scale without writing code. This capability, similar to Microsoft Excel Power Query or Microsoft Power BI Dataflows, offers self-service data preparation. Business users can prepare and integrate data through a spreadsheet style user interface with drop-down transform options. + +Azure Data Factory is the recommended approach for implementing data integration and ETL/ELT processes for an Azure Synapse environment, especially if existing legacy processes need to be refactored. + +### Microsoft services for physical data transfer + +> [!TIP] +> Microsoft offers a range of products and services to assist with data transfer. + +#### Azure ExpressRoute + +Azure ExpressRoute creates private connections between Azure data centers and infrastructure on your premises or in a collocation environment. ExpressRoute connections don't go over the public Internet, and they offer more reliability, faster speeds, and lower latencies than typical internet connections. In some cases, by using ExpressRoute connections to transfer data between on-premises systems and Azure, you gain significant cost benefits. + +#### AzCopy + +[AzCopy](../../../storage/common/storage-use-azcopy-v10.md) is a command line utility that copies files to Azure Blob Storage via a standard internet connection. In a warehouse migration project, you can use AzCopy to upload extracted, compressed, and delimited text files before loading through PolyBase, or a native Parquet reader if the exported files are Parquet format. AzCopy can upload individual files, file selections, or file directories. + +#### Azure Data Box + +Microsoft offers a service called Azure Data Box. This service writes data to be migrated to a physical storage device. This device is then shipped to an Azure data center and loaded into cloud storage. The service can be cost-effective for large volumes of data—for example, tens or hundreds of terabytes—or where network bandwidth isn't readily available. Azure Data Box is typically used for one-off historical data load when migrating a large amount of data to Azure Synapse. + +Another service is Data Box Gateway, a virtualized cloud storage gateway device that resides on your premises and sends your images, media, and other data to Azure. Use Data Box Gateway for one-off migration tasks or ongoing incremental data uploads. + +### Microsoft services for data ingestion + +#### COPY INTO + +The [COPY](/sql/t-sql/statements/copy-into-transact-sql) statement provides the most flexibility for high-throughput data ingestion into Azure Synapse Analytics. Refer to the list of capabilities that `COPY` offers for data ingestion. + +#### PolyBase + +> [!TIP] +> PolyBase can load data in parallel from Azure Blob Storage into Azure Synapse. + +PolyBase provides the fastest and most scalable method of loading bulk data into Azure Synapse. PolyBase leverages the MPP architecture to use parallel loading, to give the fastest throughput, and can read data from flat files in Azure Blob Storage or directly from external data sources and other relational databases via connectors. + +PolyBase can also directly read from files compressed with gzip—this reduces the physical volume of data moved during the load process. PolyBase supports popular data formats such as delimited text, ORC and Parquet. + +> [!TIP] +> Invoke PolyBase from Azure Data Factory as part of a migration pipeline. + +PolyBase is tightly integrated with Azure Data Factory to enable data load ETL/ELT processes to be rapidly developed and scheduled through a visual GUI, leading to higher productivity and fewer errors than hand-written code. + +PolyBase is the recommended data load method for Azure Synapse, especially for high-volume data. PolyBase loads data using the `CREATE TABLE AS` or `INSERT...SELECT` statements—CTAS achieves the highest possible throughput as it minimizes the amount of logging required. Compressed delimited text files are the most efficient input format. For maximum throughput, split very large input files into multiple smaller files and load these in parallel. For fastest loading to a staging table, define the target table as type `HEAP` and use round-robin distribution. + +However, PolyBase has some limitations. Rows to be loaded must be less than 1 MB in length. Fixed-width format or nested data, such as JSON and XML, aren't directly readable. + +## Microsoft partners can help you migrate your data warehouse to Azure Synapse Analytics + +In addition to tools that can help you with various aspects of data warehouse migration, there are several practiced [Microsoft partners](../../partner/data-integration.md) that can bring their expertise to help you move your legacy on-premises data warehouse platform to Azure Synapse. + +## Next steps + +To learn more about implementing modern data warehouses, see the next article in this series: [Beyond Teradata migration, implementing a modern data warehouse in Microsoft Azure](7-beyond-data-warehouse-migration.md). \ No newline at end of file diff --git a/articles/synapse-analytics/migration-guides/teradata/7-beyond-data-warehouse-migration.md b/articles/synapse-analytics/migration-guides/teradata/7-beyond-data-warehouse-migration.md new file mode 100644 index 000000000000..1a2d12f35b71 --- /dev/null +++ b/articles/synapse-analytics/migration-guides/teradata/7-beyond-data-warehouse-migration.md @@ -0,0 +1,375 @@ +--- +title: "Beyond Teradata migration, implementing a modern data warehouse in Microsoft Azure" +description: Learn how a Teradata migration to Azure Synapse lets you integrate your data warehouse with the Microsoft Azure analytical ecosystem. +ms.service: synapse-analytics +ms.subservice: sql-dw +ms.custom: +ms.devlang: +ms.topic: conceptual +author: ajagadish-24 +ms.author: ajagadish +ms.reviewer: wiassaf +ms.date: 05/24/2022 +--- + +# Beyond Teradata migration, implementing a modern data warehouse in Microsoft Azure + +This article is part seven of a seven part series that provides guidance on how to migrate from Teradata to Azure Synapse Analytics. This article provides best practices for implementing modern data warehouses. + +## Beyond data warehouse migration to Azure + +One of the key reasons to migrate your existing data warehouse to Azure Synapse is to utilize a globally secure, scalable, low-cost, cloud-native, pay-as-you-use analytical database. Azure Synapse also lets you integrate your migrated data warehouse with the complete Microsoft Azure analytical ecosystem to take advantage of, and integrate with, other Microsoft technologies that help you modernize your migrated data warehouse. This includes integrating with technologies like: + +- Azure Data Lake Storage—for cost effective data ingestion, staging, cleansing and transformation to free up data warehouse capacity occupied by fast growing staging tables + +- Azure Data Factory—for collaborative IT and self-service data integration [with connectors](../../../data-factory/connector-overview.md) to cloud and on-premises data sources and streaming data + +- [The Open Data Model Common Data Initiative](/common-data-model/)—to share consistent trusted data across multiple technologies including: + - Azure Synapse + - Azure Synapse Spark + - Azure HDInsight + - Power BI + - SAP + - Adobe Customer Experience Platform + - Azure IoT + - Microsoft ISV Partners + +- [Microsoft's data science technologies](/azure/architecture/data-science-process/platforms-and-tools) including: + - Azure ML studio + - Azure Machine Learning Service + - Azure Synapse Spark (Spark as a service) + - Jupyter Notebooks + - RStudio + - ML.NET + - Visual Studio .NET for Apache Spark to enable data scientists to use Azure Synapse data to train machine learning models at scale. + +- [Azure HDInsight](../../../hdinsight/index.yml)—to leverage big data analytical processing and join big data with Azure Synapse data by creating a Logical Data Warehouse using PolyBase + +- [Azure Event Hubs](../../../event-hubs/event-hubs-about.md), [Azure Stream Analytics](../../../stream-analytics/stream-analytics-introduction.md) and [Apache Kafka](/azure/databricks/spark/latest/structured-streaming/kafka)—to integrate with live streaming data from within Azure Synapse + +There's often acute demand to integrate with [Machine Learning](../../machine-learning/what-is-machine-learning.md) to enable custom built, trained machine learning models for use in Azure Synapse. This would enable in-database analytics to run at scale in-batch, on an event-driven basis and on-demand. The ability to exploit in-database analytics in Azure Synapse from multiple BI tools and applications also guarantees that all get the same predictions and recommendations. + +In addition, there's an opportunity to integrate Azure Synapse with Microsoft partner tools on Azure to shorten time to value. + +Let's look at these in more detail to understand how you can take advantage of the technologies in Microsoft's analytical ecosystem to modernize your data warehouse once you've migrated to Azure Synapse. + +## Offload data staging and ETL processing to Azure Data Lake and Azure Data Factory + +Enterprises today have a key problem resulting from digital transformation. So much new data is being generated and captured for analysis, and much of this data is finding its way into data warehouses. A good example is transaction data created by opening online transaction processing (OLTP) systems to self-service access from mobile devices. These OLTP systems are the main sources of data to a data warehouse, and with customers now driving the transaction rate rather than employees, data in data warehouse staging tables has been growing rapidly in volume. + +This, along with other new data—like Internet of Things (IoT) data, coming into the enterprise, means that companies need to find a way to deal with unprecedented data growth and scale data integration ETL processing beyond current levels. One way to do this is to offload ingestion, data cleansing, transformation and integration to a data lake and process it at scale there, as part of a data warehouse modernization program. + +Once you've migrated your data warehouse to Azure Synapse, Microsoft provides the ability to modernize your ETL processing by ingesting data into, and staging data in, Azure Data Lake Storage. You can then clean, transform and integrate your data at scale using Data Factory before loading it into Azure Synapse in parallel using PolyBase. + +For ELT strategies, consider offloading ELT processing to Azure Data Lake to easily scale as your data volume or frequency grows. + +### Microsoft Azure Data Factory + +> [!TIP] +> Data Factory allows you to build scalable data integration pipelines code free. + +[Microsoft Azure Data Factory](https://azure.microsoft.com/services/data-factory/) is a pay-as-you-use, hybrid data integration service for highly scalable ETL and ELT processing. Data Factory provides a simple web-based user interface to build data integration pipelines, in a code-free manner that can: + +- Data Factory allows you to build scalable data integration pipelines code free. Easily acquire data at scale. Pay only for what you use and connect to on premises, cloud, and SaaS based data sources. + +- Ingest, move, clean, transform, integrate, and analyze cloud and on-premises data at scale and take automatic action such a recommendation, an alert, and more. + +- Seamlessly author, monitor and manage pipelines that span data stores both on-premises and in the cloud. + +- Enable pay as you go scale out in alignment with customer growth. + +> [!TIP] +> Data Factory can connect to on-premises, cloud, and SaaS data. + +All of this can be done without writing any code. However, adding custom code to Data Factory pipelines is also supported. The next screenshot shows an example Data Factory pipeline. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-data-factory-pipeline.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory pipeline."::: + +> [!TIP] +> Pipelines called data factories control the integration and analysis of data. Data Factory is enterprise class data integration software aimed at IT professionals with a data wrangling facility for business users. + +Implement Data Factory pipeline development from any of several places including: + +- Microsoft Azure portal + +- Microsoft Azure PowerShell + +- Programmatically from .NET and Python using a multi-language SDK + +- Azure Resource Manager (ARM) Templates + +- REST APIs + +Developers and data scientists who prefer to write code can easily author Data Factory pipelines in Java, Python, and .NET using the software development kits (SDKs) available for those programming languages. Data Factory pipelines can also be hybrid as they can connect, ingest, clean, transform and analyze data in on-premises data centers, Microsoft Azure, other clouds, and SaaS offerings. + +Once you develop Data Factory pipelines to integrate and analyze data, deploy those pipelines globally and schedule them to run in batch, invoke them on demand as a service, or run them in real time on an event-driven basis. A Data Factory pipeline can also run on one or more execution engines and monitor pipeline execution to ensure performance and track errors. + +#### Use cases + +> [!TIP] +> Build data warehouses on Microsoft Azure. + +> [!TIP] +> Build training data sets in data science to develop machine learning models. + +Data Factory can support multiple use cases, including: + +- Preparing, integrating, and enriching data from cloud and on-premises data sources to populate your migrated data warehouse and data marts on Microsoft Azure Synapse. + +- Preparing, integrating, and enriching data from cloud and on-premises data sources to produce training data for use in machine learning model development and in retraining analytical models. + +- Orchestrating data preparation and analytics to create predictive and prescriptive analytical pipelines for processing and analyzing data in batch, such as sentiment analytics, and either acting on the results of the analysis or populating your data warehouse with the results. + +- Preparing, integrating, and enriching data for data-driven business applications running on the Azure cloud on top of operational data stores like Azure Cosmos DB. + +#### Data sources + +Data Factory lets you connect with [connectors](../../../data-factory/connector-overview.md) from both cloud and on-premises data sources. Agent software, known as a Self-Hosted Integration Runtime, securely accesses on-premises data sources and supports secure, scalable data transfer. + +#### Transform data using Azure Data Factory + +> [!TIP] +> Professional ETL developers can use Azure Data Factory mapping data flows to clean, transform and integrate data without the need to write code. + +Within a Data Factory pipeline, ingest, clean, transform, integrate, and, if necessary, analyze any type of data from these sources. This includes structured, semi-structured—such as JSON or Avro—and unstructured data. + +Professional ETL developers can use Data Factory mapping data flows to filter, split, join (many types), lookup, pivot, unpivot, sort, union, and aggregate data without writing any code. In addition, Data Factory supports surrogate keys, multiple write processing options such as insert, upsert, update, table recreation, and table truncation, and several types of target data stores—also known as sinks. ETL developers can also create aggregations, including time series aggregations that require a window to be placed on data columns. + +> [!TIP] +> Data Factory supports the ability to automatically detect and manage schema changes in inbound data, such as in streaming data. + +Run mapping data flows that transform data as activities in a Data Factory pipeline. Include multiple mapping data flows in a single pipeline, if necessary. Break up challenging data transformation and integration tasks into smaller mapping dataflows that can be combined to handle the complexity and custom code added if necessary. In addition to this functionality, Data Factory mapping data flows include these abilities: + +- Define expressions to clean and transform data, compute aggregations, and enrich data. For example, these expressions can perform feature engineering on a date field to break it into multiple fields to create training data during machine learning model development. Construct expressions from a rich set of functions that include mathematical, temporal, split, merge, string concatenation, conditions, pattern match, replace, and many other functions. + +- Automatically handle schema drift so that data transformation pipelines can avoid being impacted by schema changes in data sources. This is especially important for streaming IoT data, where schema changes can happen without notice when devices are upgraded or when readings are missed by gateway devices collecting IoT data. + +- Partition data to enable transformations to run in parallel at scale. + +- Inspect data to view the metadata of a stream you're transforming. + +> [!TIP] +> Data Factory can also partition data to enable ETL processing to run at scale. + +The next screenshot shows an example Data Factory mapping data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows.png" border="true" alt-text="Screenshot showing an example of an Azure Data Factory mapping dataflow." lightbox="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-mapping-dataflows-lrg.png"::: + +Data engineers can profile data quality and view the results of individual data transforms by switching on a debug capability during development. + +> [!TIP] +> Data Factory pipelines are also extensible since Data Factory allows you to write your own code and run it as part of a pipeline. + +Extend Data Factory transformational and analytical functionality by adding a linked service containing your own code into a pipeline. For example, an Azure Synapse Spark Pool notebook containing Python code could use a trained model to score the data integrated by a mapping data flow. + +Store integrated data and any results from analytics included in a Data Factory pipeline in one or more data stores such as Azure Data Lake storage, Azure Synapse, or Azure HDInsight (Hive Tables). Invoke other activities to act on insights produced by a Data Factory analytical pipeline. + +#### Utilize Spark to scale data integration + +Under the covers, Data Factory utilizes Azure Synapse Spark Pools—Microsoft's Spark-as-a-service offering—at run time to clean and integrate data on the Microsoft Azure cloud. This enables it to clean, integrate, and analyze high-volume and very high-velocity data (such as click stream data) at scale. Microsoft intends to execute Data Factory pipelines on other Spark distributions. In addition to executing ETL jobs on Spark, Data Factory can also invoke Pig scripts and Hive queries to access and transform data stored in Azure HDInsight. + +#### Link self-service data prep and Data Factory ETL processing using wrangling data flows + +> [!TIP] +> Data Factory support for wrangling data flows in addition to mapping data flows means that business and IT can work together on a common platform to integrate data. + +Another new capability in Data Factory is wrangling data flows. This lets business users (also known as citizen data integrators and data engineers) make use of the platform to visually discover, explore and prepare data at scale without writing code. This easy-to-use Data Factory capability is similar to Microsoft Excel Power Query or Microsoft Power BI Dataflows, where self-service data preparation business users use a spreadsheet-style UI with drop-down transforms to prepare and integrate data. The following screenshot shows an example Data Factory wrangling data flow. + +:::image type="content" source="../media/6-microsoft-3rd-party-migration-tools/azure-data-factory-wrangling-dataflows.png" border="true" alt-text="Screenshot showing an example of Azure Data Factory wrangling dataflows."::: + +This differs from Excel and Power BI, as Data Factory wrangling data flows uses Power Query Online to generate M code and translate it into a massively parallel in-memory Spark job for cloud scale execution. The combination of mapping data flows and wrangling data flows in Data Factory lets IT professional ETL developers and business users collaborate to prepare, integrate, and analyze data for a common business purpose. The preceding Data Factory mapping data flow diagram shows how both Data Factory and Azure Synapse Spark Pool Notebooks can be combined in the same Data Factory pipeline. This allows IT and business to be aware of what each has created. Mapping data flows and wrangling data flows can then be available for reuse to maximize productivity and consistency and minimize reinvention. + +#### Link data and analytics in analytical pipelines + +In addition to cleaning and transforming data, Data Factory can combine data integration and analytics in the same pipeline. Use Data Factory to create both data integration and analytical pipelines—the latter being an extension of the former. Drop an analytical model into a pipeline so that clean, integrated data can be stored to provide predictions or recommendations. Act on this information immediately or store it in your data warehouse to provide you with new insights and recommendations that can be viewed in BI tools. + +Models developed code-free with Azure ML Studio, Azure Machine Learning Service SDK using Azure Synapse Spark Pool Notebooks, or using R in RStudio, can be invoked as a service from within a Data Factory pipeline to batch score your data. Analysis happens at scale by executing Spark machine learning pipelines on Azure Synapse Spark Pool Notebooks. + +Store integrated data and any results from analytics included in a Data Factory pipeline in one or more data stores, such as Azure Data Lake storage, Azure Synapse, or Azure HDInsight (Hive Tables). Invoke other activities to act on insights produced by a Data Factory analytical pipeline. + +## A lake database to share consistent trusted data + +> [!TIP] +> Microsoft has created a lake database to describe core data entities to be shared across the enterprise. + +A key objective in any data integration set-up is the ability to integrate data once and reuse it everywhere, not just in a data warehouse—for example, in data science. Reuse avoids reinvention and ensures consistent, commonly understood data that everyone can trust. + +> [!TIP] +> Azure Data Lake is shared storage that underpins Microsoft Azure Synapse, Azure ML, Azure Synapse Spark, and Azure HDInsight. + +To achieve this goal, establish a set of common data names and definitions describing logical data entities that need to be shared across the enterprise—such as customer, account, product, supplier, orders, payments, returns, and so forth. Once this is done, IT and business professionals can use data integration software to create these common data assets and store them to maximize their reuse to drive consistency everywhere. + +> [!TIP] +> Integrating data to create lake database logical entities in shared storage enables maximum reuse of common data assets. + +Microsoft has done this by creating a [lake database](../../database-designer/concepts-lake-database.md). The lake database is a common language for business entities that represents commonly used concepts and activities across a business. Azure Synapse Analytics provides industry specific database templates to help standardize data in the lake. [Lake database templates](../../database-designer/concepts-database-templates.md) provide schemas for predefined business areas, enabling data to the loaded into a lake database in a structured way. The power comes when data integration software is used to create lake database common data assets. This results in self-describing trusted data that can be consumed by applications and analytical systems. Create a lake database in Azure Data Lake storage using Azure Data Factory, and consume it with Power BI, Azure Synapse Spark, Azure Synapse and Azure ML. The following diagram shows a lake database used in Azure Synapse Analytics. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-synapse-analytics-lake-database.png" border="true" alt-text="Screenshot showing how a lake database can be used in Azure Synapse Analytics."::: + +## Integration with Microsoft data science technologies on Azure + +Another key requirement in modernizing your migrated data warehouse is to integrate it with Microsoft and third-party data science technologies on Azure to produce insights for competitive advantage. Let's look at what Microsoft offers in terms of machine learning and data science technologies and see how these can be used with Azure Synapse in a modern data warehouse environment. + +### Microsoft technologies for data science on Azure + +> [!TIP] +> Develop machine learning models using a no/low code approach or from a range of programming languages like Python, R and .NET. + +Microsoft offers a range of technologies to build predictive analytical models using machine learning, analyze unstructured data using deep learning, and perform other kinds of advanced analytics. This includes: + +- Azure ML Studio + +- Azure Machine Learning Service + +- Azure Synapse Spark Pool Notebooks + +- ML.NET (API, CLI or .NET Model Builder for Visual Studio) + +- Visual Studio .NET for Apache Spark + +Data scientists can use RStudio (R) and Jupyter Notebooks (Python) to develop analytical models, or they can use other frameworks such as Keras or TensorFlow. + +#### Azure ML Studio + +Azure ML Studio is a fully managed cloud service that lets you easily build, deploy, and share predictive analytics via a drag-and-drop web-based user interface. The next screenshot shows an Azure Machine Learning studio user interface. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-ml-studio-ui.png" border="true" alt-text="Screenshot showing predictive analysis in the Azure Machine Learning studio user interface."::: + +#### Azure Machine Learning Service + +> [!TIP] +> Azure Machine Learning Service provides an SDK for developing machine learning models using several open-source frameworks. + +Azure Machine Learning Service provides a software development kit (SDK) and services for Python to quickly prepare data, as well as train and deploy machine learning models. Use Azure Machine Learning Service from Azure notebooks (a Jupyter notebook service) and utilize open-source frameworks, such as PyTorch, TensorFlow, Spark MLlib (Azure Synapse Spark Pool Notebooks), or scikit-learn. Azure Machine Learning Service provides an AutoML capability that automatically identifies the most accurate algorithms to expedite model development. You can also use it to build machine learning pipelines that manage end-to-end workflow, programmatically scale on the cloud, and deploy models both to the cloud and the edge. Azure Machine Learning Service uses logical containers called workspaces, which can be either created manually from the Azure portal or created programmatically. These workspaces keep compute targets, experiments, data stores, trained machine learning models, docker images, and deployed services all in one place to enable teams to work together. Use Azure Machine Learning Service from Visual Studio with a Visual Studio for AI extension. + +> [!TIP] +> Organize and manage related data stores, experiments, trained models, docker images and deployed services in workspaces. + +#### Azure Synapse Spark Pool Notebooks + +> [!TIP] +> Azure Synapse Spark is Microsoft's dynamically scalable Spark-as-a-service offering scalable execution of data preparation, model development and deployed model execution. + +[Azure Synapse Spark Pool Notebooks](../../spark/apache-spark-development-using-notebooks.md?msclkid=cbe4b8ebcff511eca068920ea4bf16b9) is an Apache Spark service optimized to run on Azure which: + +- Allows data engineers to build and execute scalable data preparation jobs using Azure Data Factory + +- Allows data scientists to build and execute machine learning models at scale using notebooks written in languages such as Scala, R, Python, Java, and SQL; and to visualize results + +> [!TIP] +> Azure Synapse Spark can access data in a range of Microsoft analytical ecosystem data stores on Azure. + +Jobs running in Azure Synapse Spark Pool Notebook can retrieve, process, and analyze data at scale from Azure Blob Storage, Azure Data Lake Storage, Azure Synapse, Azure HDInsight, and streaming data services such as Kafka. + +Autoscaling and auto-termination are also supported to reduce total cost of ownership (TCO). Data scientists can use the ML flow open-source framework to manage the machine learning lifecycle. + +#### ML.NET + +> [!TIP] +> Microsoft has extended its machine learning capability to .NET developers. + +ML.NET is an open-source and cross-platform machine learning framework (Windows, Linux, macOS), created by Microsoft for .NET developers so that they can use existing tools—like .NET Model Builder for Visual Studio—to develop custom machine learning models and integrate them into .NET applications. + +#### Visual Studio .NET for Apache Spark + +Visual Studio .NET for Apache® Spark™ aims to make Spark accessible to .NET developers across all Spark APIs. It takes Spark support beyond R, Scala, Python, and Java to .NET. While initially only available on Apache Spark on HDInsight, Microsoft intends to make this available on Azure Synapse Spark Pool Notebook. + +### Utilize Azure Analytics with your data warehouse + +> [!TIP] +> Train, test, evaluate, and execute machine learning models at scale on Azure Synapse Spark Pool Notebook using data in your Azure Synapse. + +Combine machine learning models built using the tools with Azure Synapse by: + +- Using machine learning models in batch mode or in real time to produce new insights, and add them to what you already know in Azure Synapse. + +- Using the data in Azure Synapse to develop and train new predictive models for deployment elsewhere, such as in other applications. + +- Deploying machine learning models—including those trained elsewhere—in Azure Synapse to analyze data in the data warehouse and drive new business value. + +> [!TIP] +> Produce new insights using machine learning on Azure in batch or in real-time and add to what you know in your data warehouse. + +In terms of machine learning model development, data scientists can use RStudio, Jupyter notebooks, and Azure Synapse Spark Pool notebooks together with Microsoft Azure Machine Learning Service to develop machine learning models that run at scale on Azure Synapse Spark Pool Notebooks using data in Azure Synapse. For example, they could create an unsupervised model to segment customers for use in driving different marketing campaigns. Use supervised machine learning to train a model to predict a specific outcome, such as predicting a customer's propensity to churn, or recommending the next best offer for a customer to try to increase their value. The next diagram shows how Azure Synapse Analytics can be leveraged for Machine Learning. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-synapse-train-predict.png" border="true" alt-text="Screenshot of an Azure Synapse Analytics train and predict model."::: + +In addition, you can ingest big data—such as social network data or review website data—into Azure Data Lake, then prepare and analyze it at scale on Azure Synapse Spark Pool Notebook, using natural language processing to score sentiment about your products or your brand. Add these scores to your data warehouse to understand the impact of—for example—negative sentiment on product sales, and to leverage big data analytics to add to what you already know in your data warehouse. + +## Integrate live streaming data into Azure Synapse Analytics + +When analyzing data in a modern data warehouse, you must be able to analyze streaming data in real time and join it with historical data in your data warehouse. An example of this would be combining IoT data with product or asset data. + +> [!TIP] +> Integrate your data warehouse with streaming data from IoT devices or clickstream. + +Once you've successfully migrated your data warehouse to Azure Synapse, you can introduce this capability as part of a data warehouse modernization exercise. Do this by taking advantage of additional functionality in Azure Synapse. + +> [!TIP] +> Ingest streaming data into Azure Data Lake Storage from Microsoft Event Hub or Kafka, and access it from Azure Synapse using PolyBase external tables. + +To do this, ingest streaming data via Microsoft Event Hubs or other technologies, such as Kafka, using Azure Data Factory (or using an existing ETL tool if it supports the streaming data sources) and land it in Azure Data Lake Storage (ADLS). Next, create an external table in Azure Synapse using PolyBase and point it at the data being streamed into Azure Data Lake. Your migrated data warehouse will now contain new tables that provide access to real-time streaming data. Query this external table as if the data was in the data warehouse via standard TSQL from any BI tool that has access to Azure Synapse. You can also join this data to other tables containing historical data and create views that join live streaming data to historical data to make it easier for business users to access. In the following diagram, a real-time data warehouse on Azure Synapse analytics is integrated with streaming data in Azure Data Lake. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/azure-datalake-streaming-data.png" border="true" alt-text="Screenshot of Azure Synapse Analytics with streaming data in an Azure Data Lake."::: + +## Create a logical data warehouse using PolyBase + +> [!TIP] +> PolyBase simplifies access to multiple underlying analytical data stores on Azure to simplify access for business users. + +PolyBase offers the capability to create a logical data warehouse to simplify user access to multiple analytical data stores. + +This is attractive because many companies have adopted 'workload optimized' analytical data stores over the last several years in addition to their data warehouses. Examples of these platforms on Azure include: + +- Azure Data Lake Storage with Azure Synapse Spark Pool Notebook (Spark-as-a-service), for big data analytics + +- Azure HDInsight (Hadoop as-a-service), also for big data analytics + +- NoSQL Graph databases for graph analysis, which could be done in Azure Cosmos DB + +- Azure Event Hubs and Azure Stream Analytics, for real-time analysis of data in motion + +You may have non-Microsoft equivalents of some of these. You may also have a master data management (MDM) system that needs to be accessed for consistent trusted data on customers, suppliers, products, assets, and more. + +These additional analytical platforms have emerged because of the explosion of new data sources—both inside and outside the enterprises—that business users want to capture and analyze. Examples include: + +- Machine generated data, such as IoT sensor data and clickstream data. + +- Human generated data, such as social network data, review web site data, customer in-bound email, image, and video. + +- Other external data, such as open government data and weather data. + +This data is over and above the structured transaction data and master data sources that typically feed data warehouses. These new data sources include semi-structured data (like JSON, XML, or Avro) or unstructured data (like text, voice, image, or video) which is more complex to process and analyze. This data could be very high volume, high velocity, or both. + +As a result, the need for new kinds of more complex analysis has emerged, such as natural language processing, graph analysis, deep learning, streaming analytics, or complex analysis of large volumes of structured data. All of this is typically not happening in a data warehouse, so it's not surprising to see different analytical platforms for different types of analytical workloads, as shown in this diagram. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/analytical-workload-platforms.png" border="true" alt-text="Screenshot of different analytical platforms for different types of analytical workloads in Azure Synapse Analytics."::: + +Since these platforms are producing new insights, it's normal to see a requirement to combine these insights with what you already know in Azure Synapse. That's what PolyBase makes possible. + +> [!TIP] +> The ability to make data in multiple analytical data stores look like it's all in one system and join it to Azure Synapse is known as a logical data warehouse architecture. + +By leveraging PolyBase data virtualization inside Azure Synapse, you can implement a logical data warehouse. Join data in Azure Synapse to data in other Azure and on-premises analytical data stores—like Azure HDInsight or Cosmos DB—or to streaming data flowing into Azure Data Lake storage from Azure Stream Analytics and Event Hubs. Users access external tables in Azure Synapse, unaware that the data they're accessing is stored in multiple underlying analytical systems. The next diagram shows the complex data warehouse structure accessed through comparatively simpler but still powerful user interface methods. + +:::image type="content" source="../media/7-beyond-data-warehouse-migration/complex-data-warehouse-structure.png" alt-text="Screenshot showing an example of a complex data warehouse structure accessed through user interface methods."::: + +The previous diagram shows how other technologies of the Microsoft analytical ecosystem can be combined with the capability of Azure Synapse logical data warehouse architecture. For example, data can be ingested into Azure Data Lake Storage (ADLS) and curated using Azure Data Factory to create trusted data products that represent Microsoft [lake database](../../database-designer/concepts-lake-database.md) logical data entities. This trusted, commonly understood data can then be consumed and reused in different analytical environments such as Azure Synapse, Azure Synapse Spark Pool Notebooks, or Azure Cosmos DB. All insights produced in these environments are accessible via a logical data warehouse data virtualization layer made possible by PolyBase. + +> [!TIP] +> A logical data warehouse architecture simplifies business user access to data and adds new value to what you already know in your data warehouse. + +## Conclusions + +> [!TIP] +> Migrating your data warehouse to Azure Synapse lets you make use of a rich Microsoft analytical ecosystem running on Azure. + +Once you migrate your data warehouse to Azure Synapse, you can leverage other technologies in the Microsoft analytical ecosystem. You can't only modernize your data warehouse, but combine insights produced in other Azure analytical data stores into an integrated analytical architecture. + +Broaden your ETL processing to ingest data of any type into Azure Data Lake Storage. Prepare and integrate it at scale using Azure Data Factory to produce trusted, commonly understood data assets that can be consumed by your data warehouse and accessed by data scientists and other applications. Build real-time and batch-oriented analytical pipelines and create machine learning models to run in batch, in-real-time on streaming data and on-demand as a service. + +Leverage PolyBase and `COPY INTO` to go beyond your data warehouse. Simplify access to insights from multiple underlying analytical platforms on Azure by creating holistic integrated views in a logical data warehouse. Easily access streaming, big data, and traditional data warehouse insights from BI tools and applications to drive new value in your business. + +## Next steps + +To learn more about migrating to a dedicated SQL pool, see [Migrate a data warehouse to a dedicated SQL pool in Azure Synapse Analytics](../migrate-to-synapse-analytics-guide.md). \ No newline at end of file diff --git a/articles/synapse-analytics/partner/business-intelligence.md b/articles/synapse-analytics/partner/business-intelligence.md index 2cd3cd6b0e97..419ce517f6ee 100644 --- a/articles/synapse-analytics/partner/business-intelligence.md +++ b/articles/synapse-analytics/partner/business-intelligence.md @@ -8,7 +8,7 @@ ms.subservice: sql-dw ms.date: 07/09/2021 author: gillharmeet ms.author: harmeetgill -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/partner/data-integration.md b/articles/synapse-analytics/partner/data-integration.md index 94fe24ba60ea..20ec38d892b8 100644 --- a/articles/synapse-analytics/partner/data-integration.md +++ b/articles/synapse-analytics/partner/data-integration.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/27/2019 ms.author: harmeetgill -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/partner/data-management.md b/articles/synapse-analytics/partner/data-management.md index e0970413db9b..e7dd4c268b33 100644 --- a/articles/synapse-analytics/partner/data-management.md +++ b/articles/synapse-analytics/partner/data-management.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: harmeetgill -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/spark/synapse-spark-sql-pool-import-export.md b/articles/synapse-analytics/spark/synapse-spark-sql-pool-import-export.md index 557466b445e8..13f0587f0708 100644 --- a/articles/synapse-analytics/spark/synapse-spark-sql-pool-import-export.md +++ b/articles/synapse-analytics/spark/synapse-spark-sql-pool-import-export.md @@ -195,17 +195,22 @@ This section presents reference code templates to describe how to use and invoke #### Read Request - `synapsesql` method signature +##### [Scala](#tab/scala) + ```Scala synapsesql(tableName:String) => org.apache.spark.sql.DataFrame ``` +##### [Python](#tab/python) + ```python synapsesql(table_name: str) -> org.apache.spark.sql.DataFrame ``` +--- #### Read using Azure AD based authentication -##### [Scala](#tab/scala) +##### [Scala](#tab/scala1) ```Scala //Use case is to read data from an internal table in Synapse Dedicated SQL Pool DB @@ -234,7 +239,7 @@ val dfToReadFromTable:DataFrame = spark.read. dfToReadFromTable.show() ``` -##### [Python](#tab/python) +##### [Python](#tab/python1) ```python # Add required imports @@ -261,10 +266,11 @@ dfToReadFromTable = (spark.read # Show contents of the dataframe dfToReadFromTable.show() ``` +--- #### Read using basic authentication -##### [Scala](#tab/scala1) +##### [Scala](#tab/scala2) ```Scala //Use case is to read data from an internal table in Synapse Dedicated SQL Pool DB @@ -298,7 +304,7 @@ val dfToReadFromTable:DataFrame = spark.read. dfToReadFromTable.show() ``` -##### [Python](#tab/python1) +##### [Python](#tab/python2) ```python # Add required imports @@ -332,6 +338,7 @@ dfToReadFromTable = (spark.read dfToReadFromTable.show() ``` +--- ### Write to Azure Synapse Dedicated SQL Pool @@ -349,6 +356,8 @@ synapsesql(tableName:String, * Spark Pool Version 3.1.2 +##### [Scala](#tab/scala3) + ```Scala synapsesql(tableName:String, tableType:String = Constants.INTERNAL, @@ -356,15 +365,18 @@ synapsesql(tableName:String, callBackHandle=Option[(Map[String, Any], Option[Throwable])=>Unit]):Unit ``` +##### [Python](#tab/python3) + ```python synapsesql(table_name: str, table_type: str = Constants.INTERNAL, location: str = None) -> None ``` +--- #### Write using Azure AD based authentication Following is a comprehensive code template that describes how to use the Connector for write scenarios: -##### [Scala](#tab/scala2) +##### [Scala](#tab/scala4) ```Scala //Add required imports @@ -423,7 +435,7 @@ readDF. if(errorDuringWrite.isDefined) throw errorDuringWrite.get ``` -##### [Python](#tab/python2) +##### [Python](#tab/python4) ```python @@ -475,12 +487,13 @@ from com.microsoft.spark.sqlanalytics.Constants import Constants "/path/to/external/table")) ``` +--- #### Write using basic authentication Following code snippet replaces the write definition described in the [Write using Azure AD based authentication](#write-using-azure-ad-based-authentication) section, to submit write request using SQL basic authentication approach: -##### [Scala](#tab/scala3) +##### [Scala](#tab/scala5) ```Scala //Define write options to use SQL basic authentication @@ -509,7 +522,7 @@ readDF. callBackHandle = Some(callBackFunctionToReceivePostWriteMetrics)) ``` -##### [Python](#tab/python3) +##### [Python](#tab/python5) ```python # Write using Basic Auth to Internal table @@ -570,6 +583,7 @@ from com.microsoft.spark.sqlanalytics.Constants import Constants "/path/to/external/table")) ``` +--- In a basic authentication approach, in order to read data from a source storage path other configuration options are required. Following code snippet provides an example to read from an Azure Data Lake Storage Gen2 data source using Service Principal credentials: @@ -700,9 +714,9 @@ Spark DataFrame's `createOrReplaceTempView` can be used to access data fetched i * Now, change the language preference on the Notebook to `PySpark (Python)` and fetch data from the registered view `` - ```Python +```Python spark.sql("select * from ").show() - ``` +``` ### Response handling diff --git a/articles/synapse-analytics/sql-data-warehouse/cheat-sheet.md b/articles/synapse-analytics/sql-data-warehouse/cheat-sheet.md index f2d4afe47e54..c588de4f5bad 100644 --- a/articles/synapse-analytics/sql-data-warehouse/cheat-sheet.md +++ b/articles/synapse-analytics/sql-data-warehouse/cheat-sheet.md @@ -8,7 +8,7 @@ ms.topic: overview ms.subservice: sql-dw ms.date: 11/04/2019 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Cheat sheet for dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytic diff --git a/articles/synapse-analytics/sql-data-warehouse/column-level-security.md b/articles/synapse-analytics/sql-data-warehouse/column-level-security.md index 4636f7fc7310..279681726305 100644 --- a/articles/synapse-analytics/sql-data-warehouse/column-level-security.md +++ b/articles/synapse-analytics/sql-data-warehouse/column-level-security.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/19/2020 ms.author: nanditav -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 tags: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-portal.md b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-portal.md index a1850c73e8f7..c0d20727f623 100644 --- a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-portal.md +++ b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-portal.md @@ -4,7 +4,7 @@ description: Create and query a dedicated SQL pool (formerly SQL DW) using the A author: pimorano ms.author: pimorano manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 05/28/2019 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-powershell.md b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-powershell.md index cd13bc83bacb..2f6d0b6df42f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-powershell.md +++ b/articles/synapse-analytics/sql-data-warehouse/create-data-warehouse-powershell.md @@ -4,7 +4,7 @@ description: Quickly create a dedicated SQL pool (formerly SQL DW) with a server author: joannapea ms.author: joanpo manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 4/11/2019 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/design-elt-data-loading.md b/articles/synapse-analytics/sql-data-warehouse/design-elt-data-loading.md index d6d0d9e19309..11926fe1d679 100644 --- a/articles/synapse-analytics/sql-data-warehouse/design-elt-data-loading.md +++ b/articles/synapse-analytics/sql-data-warehouse/design-elt-data-loading.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/20/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/disable-geo-backup.md b/articles/synapse-analytics/sql-data-warehouse/disable-geo-backup.md index 3bdb35a7ea66..ed2facf17a75 100644 --- a/articles/synapse-analytics/sql-data-warehouse/disable-geo-backup.md +++ b/articles/synapse-analytics/sql-data-warehouse/disable-geo-backup.md @@ -2,13 +2,13 @@ title: Disable geo-backups description: How-to guide for disabling geo-backups for a dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics author: joannapea -manager: igorstan +manager: ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 01/06/2021 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/fivetran-quickstart.md b/articles/synapse-analytics/sql-data-warehouse/fivetran-quickstart.md index 434b5d28e5b4..f0053d4be2c9 100644 --- a/articles/synapse-analytics/sql-data-warehouse/fivetran-quickstart.md +++ b/articles/synapse-analytics/sql-data-warehouse/fivetran-quickstart.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 10/12/2018 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/load-data-from-azure-blob-storage-using-copy.md b/articles/synapse-analytics/sql-data-warehouse/load-data-from-azure-blob-storage-using-copy.md index 3bdd62346b25..bc5398c242ac 100644 --- a/articles/synapse-analytics/sql-data-warehouse/load-data-from-azure-blob-storage-using-copy.md +++ b/articles/synapse-analytics/sql-data-warehouse/load-data-from-azure-blob-storage-using-copy.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/23/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/load-data-wideworldimportersdw.md b/articles/synapse-analytics/sql-data-warehouse/load-data-wideworldimportersdw.md index 6ba9bab6aa92..6bc180870a70 100644 --- a/articles/synapse-analytics/sql-data-warehouse/load-data-wideworldimportersdw.md +++ b/articles/synapse-analytics/sql-data-warehouse/load-data-wideworldimportersdw.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 01/12/2021 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, synapse-analytics --- diff --git a/articles/synapse-analytics/sql-data-warehouse/manage-compute-with-azure-functions.md b/articles/synapse-analytics/sql-data-warehouse/manage-compute-with-azure-functions.md index 9d238bfb650f..987c0a137760 100644 --- a/articles/synapse-analytics/sql-data-warehouse/manage-compute-with-azure-functions.md +++ b/articles/synapse-analytics/sql-data-warehouse/manage-compute-with-azure-functions.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/27/2018 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/massively-parallel-processing-mpp-architecture.md b/articles/synapse-analytics/sql-data-warehouse/massively-parallel-processing-mpp-architecture.md index ee11a0dece35..f33b1f7e0550 100644 --- a/articles/synapse-analytics/sql-data-warehouse/massively-parallel-processing-mpp-architecture.md +++ b/articles/synapse-analytics/sql-data-warehouse/massively-parallel-processing-mpp-architecture.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/04/2019 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Dedicated SQL pool (formerly SQL DW) architecture in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-portal.md b/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-portal.md index 91c8e1045aed..d9c9abfd1fbc 100644 --- a/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-portal.md +++ b/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-portal.md @@ -4,7 +4,7 @@ description: Use the Azure portal to pause compute for dedicated SQL pool to sav author: WilliamDAssafMSFT ms.author: wiassaf manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 11/23/2020 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-powershell.md b/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-powershell.md index e9e6938dea12..4cbec486a8ef 100644 --- a/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-powershell.md +++ b/articles/synapse-analytics/sql-data-warehouse/pause-and-resume-compute-powershell.md @@ -4,7 +4,7 @@ description: You can use Azure PowerShell to pause and resume dedicated SQL pool author: WilliamDAssafMSFT ms.author: wiassaf manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 03/20/2019 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/performance-tuning-materialized-views.md b/articles/synapse-analytics/sql-data-warehouse/performance-tuning-materialized-views.md index a6e7880e4229..4c3271eee770 100644 --- a/articles/synapse-analytics/sql-data-warehouse/performance-tuning-materialized-views.md +++ b/articles/synapse-analytics/sql-data-warehouse/performance-tuning-materialized-views.md @@ -1,14 +1,13 @@ --- title: Performance tune with materialized views description: Learn about recommendations and considerations you should know as you use materialized views to improve your query performance. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/17/2021 +author: XiaoyuMSFT ms.author: xiaoyul -ms.reviewer: nibruno; jrasnick; azure-synapse +ms.reviewer: wiassaf --- # Performance tune with materialized views diff --git a/articles/synapse-analytics/sql-data-warehouse/performance-tuning-ordered-cci.md b/articles/synapse-analytics/sql-data-warehouse/performance-tuning-ordered-cci.md index eb7890c6a97e..4dfebcafa350 100644 --- a/articles/synapse-analytics/sql-data-warehouse/performance-tuning-ordered-cci.md +++ b/articles/synapse-analytics/sql-data-warehouse/performance-tuning-ordered-cci.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/13/2021 ms.author: xiaoyul -ms.reviewer: nibruno; jrasnick +ms.reviewer: nibruno; wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-powershell.md b/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-powershell.md index 0d98b4acd3a2..258b5a786d5f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-powershell.md +++ b/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-powershell.md @@ -4,7 +4,7 @@ description: You can scale compute for dedicated SQL pool (formerly SQL DW) usin author: kedodd ms.author: kedodd manager: craigg -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.date: 03/09/2022 ms.topic: quickstart ms.service: synapse-analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-tsql.md b/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-tsql.md index 82a23617ef62..7cf88124e09a 100644 --- a/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-tsql.md +++ b/articles/synapse-analytics/sql-data-warehouse/quickstart-scale-compute-tsql.md @@ -8,7 +8,7 @@ ms.service: synapse-analytics ms.topic: quickstart ms.subservice: sql-dw ms.date: 03/09/2022 -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse, mode-other --- diff --git a/articles/synapse-analytics/sql-data-warehouse/single-region-residency.md b/articles/synapse-analytics/sql-data-warehouse/single-region-residency.md index b330b4c16dd6..89d774e16698 100644 --- a/articles/synapse-analytics/sql-data-warehouse/single-region-residency.md +++ b/articles/synapse-analytics/sql-data-warehouse/single-region-residency.md @@ -2,13 +2,13 @@ title: Single region residency description: How-to guide for configuring single region residency for a dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics author: joannapea -manager: igorstan +manager: ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 05/15/2021 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-authentication.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-authentication.md index 1fba6d6085e6..0c1e0e14b763 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-authentication.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-authentication.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/02/2019 ms.author: nanditav -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 tag: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-concept-recommendations.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-concept-recommendations.md index e533581ffd52..88ebf34b48d5 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-concept-recommendations.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-concept-recommendations.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 06/26/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connect-overview.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connect-overview.md index 451f3b411f06..feeeed5f0a51 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connect-overview.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connect-overview.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse, seo-lt-2019, devx-track-csharp --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connection-strings.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connection-strings.md index 8dd395f7642c..38de137294cf 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connection-strings.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-connection-strings.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse, seo-lt-2019, devx-track-csharp --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-continuous-integration-and-deployment.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-continuous-integration-and-deployment.md index 522cf6f7b0ce..8d35658d664c 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-continuous-integration-and-deployment.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-continuous-integration-and-deployment.md @@ -8,7 +8,7 @@ ms.topic: how-to ms.subservice: sql-dw ms.date: 02/04/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- # Continuous integration and deployment for dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions.md index 39c0e76513a5..22fb728e6636 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions.md @@ -1,14 +1,13 @@ --- title: Optimizing transactions -description: Learn how to optimize the performance of your transactional code in dedicated SQL pool while minimizing risk for long rollbacks. -author: XiaoyuMSFT -manager: craigg +description: Learn how to optimize the performance of your transactional code in an Azure Synapse Analytics dedicated SQL pool while minimizing risk for long rollbacks. ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/19/2018 -ms.author: xiaoyul -ms.reviewer: igorstan +author: KevinConanMSFT +ms.author: kecona +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-ctas.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-ctas.md index 07bdf2ff25fc..5077463e4d4d 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-ctas.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-ctas.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/26/2019 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seoapril2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-dynamic-sql.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-dynamic-sql.md index 48177b18fc62..542dab6a25e8 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-dynamic-sql.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-dynamic-sql.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-group-by-options.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-group-by-options.md index 27ff082c61db..f41365def64b 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-group-by-options.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-group-by-options.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-label.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-label.md index 49ace451670a..4f70b8f8912f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-label.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-label.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-loops.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-loops.md index 7007c973e472..91af02eb4bd9 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-loops.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-loops.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-stored-procedures.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-stored-procedures.md index 300138cd6fb3..710239317c37 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-stored-procedures.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-stored-procedures.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/02/2019 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-transactions.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-transactions.md index 159ec9812462..0a95f380a24a 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-transactions.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-transactions.md @@ -1,15 +1,14 @@ --- title: Use transactions in Azure Synapse Analytics SQL pool description: This article includes tips for implementing transactions and developing solutions in Synapse SQL pool. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/22/2019 -ms.author: xiaoyul +author: KevinConanMSFT +ms.author: kecona ms.custom: azure-synapse -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Use transactions in a SQL pool in Azure Synapse diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-user-defined-schemas.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-user-defined-schemas.md index 17847af3c429..020f29af4ca4 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-user-defined-schemas.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-user-defined-schemas.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: nanditav -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-variable-assignment.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-variable-assignment.md index 87e5a0bb4cfb..d1b964a87000 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-variable-assignment.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-develop-variable-assignment.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-analyze-with-azure-machine-learning.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-analyze-with-azure-machine-learning.md index ad201b586b7c..2e3a11e20c41 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-analyze-with-azure-machine-learning.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-analyze-with-azure-machine-learning.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: machine-learning ms.date: 07/15/2020 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 tag: azure-Synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-connect-sqlcmd.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-connect-sqlcmd.md index b8857ca00ab6..55f86140342c 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-connect-sqlcmd.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-connect-sqlcmd.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md index 739b9b6b7942..7d76f21bdc83 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-get-started-create-support-ticket.md @@ -7,7 +7,7 @@ ms.subservice: sql-dw ms.date: 03/10/2020 author: WilliamDAssafMSFT ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-convert-resource-classes-workload-groups.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-convert-resource-classes-workload-groups.md index da84e23db330..e1b1d84312dc 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-convert-resource-classes-workload-groups.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-convert-resource-classes-workload-groups.md @@ -8,7 +8,7 @@ ms.subservice: sql-dw ms.topic: conceptual ms.date: 08/13/2020 ms.author: rortloff -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-monitor-cache.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-monitor-cache.md index cff78a90b260..bb56ff4f15b9 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-monitor-cache.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-how-to-monitor-cache.md @@ -8,7 +8,7 @@ ms.subservice: sql-dw ms.topic: conceptual ms.date: 11/20/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-install-visual-studio.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-install-visual-studio.md index 6dbffff60718..835ef4cfb357 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-install-visual-studio.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-install-visual-studio.md @@ -10,7 +10,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 05/11/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Getting started with Visual Studio 2019 diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-integrate-azure-stream-analytics.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-integrate-azure-stream-analytics.md index d0ec3f524907..1b7d59d65fd4 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-integrate-azure-stream-analytics.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-integrate-azure-stream-analytics.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 9/25/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-blob-storage-with-polybase.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-blob-storage-with-polybase.md index 06106bb43857..0dd6fb6f8755 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-blob-storage-with-polybase.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-blob-storage-with-polybase.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/20/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md index a758172076a2..3cf7bbf6ed7f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-load-from-azure-data-lake-store.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/20/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-overview.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-overview.md index 09a8c5d7b350..9b6f166bf1e6 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-overview.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-overview.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/12/2019 ms.author: rortloff -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-rest-api.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-rest-api.md index 872da8ce051b..368fcaff7c46 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-rest-api.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-compute-rest-api.md @@ -8,7 +8,7 @@ ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/09/2022 -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-monitor.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-monitor.md index e9e0b2a7ce86..1b2277931938 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-monitor.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-manage-monitor.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/15/2021 ms.author: rortloff -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: synapse-analytics --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-develop.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-develop.md index 8ba4c91340f1..20da0477a397 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-develop.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-develop.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/29/2018 ms.author: xiaoyul -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Design decisions and coding techniques for a dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-faq.yml b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-faq.yml index 8a877363d0b1..48b0b0da32c9 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-faq.yml +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-faq.yml @@ -8,7 +8,7 @@ metadata: ms.subservice: sql-dw ms.date: 11/04/2019 ms.author: martinle - ms.reviewer: igorstan + ms.reviewer: wiassaf title: Dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics frequently asked questions summary: | diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-integrate.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-integrate.md index bc31af25b877..ff005451b6ee 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-integrate.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-integrate.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manage-security.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manage-security.md index 9c26df9058ba..d8983bb96601 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manage-security.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manage-security.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: nanditav -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 tags: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manageability-monitoring.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manageability-monitoring.md index 2ebcd896e438..41d16c2ec904 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manageability-monitoring.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-manageability-monitoring.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/27/2018 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-ssms.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-ssms.md index 5e642771d282..7ecf39f03138 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-ssms.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-ssms.md @@ -1,14 +1,13 @@ --- title: Connect to dedicated SQL pool (formerly SQL DW) with SSMS description: Use SQL Server Management Studio (SSMS) to connect to and query a dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 -ms.author: xiaoyul -ms.reviewer: igorstan +author: nanditavalsan +ms.author: nanditav +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-visual-studio.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-visual-studio.md index 3de06db12c45..3203cff3642f 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-visual-studio.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-query-visual-studio.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/15/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-collation-types.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-collation-types.md index 519398e1f957..9d780389af41 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-collation-types.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-collation-types.md @@ -2,7 +2,7 @@ title: Data warehouse collation types description: Collation types supported for dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics. author: jasonwhowell -manager: igorstan +manager: ms.service: synapse-analytics ms.subservice: sql ms.topic: conceptual diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-powershell-cmdlets.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-powershell-cmdlets.md index 39a72873b7c6..11777573f723 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-powershell-cmdlets.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-powershell-cmdlets.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 04/17/2018 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-language-elements.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-language-elements.md index 648525eb0085..f187823bfba4 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-language-elements.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-language-elements.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 06/13/2018 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-statements.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-statements.md index 633939cd620b..e873fea9a980 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-statements.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-statements.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 05/01/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-system-views.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-system-views.md index b09117dfa116..504eb690e395 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-system-views.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-reference-tsql-system-views.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 01/06/2020 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-deleted-dw.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-deleted-dw.md index cff3030f388f..f842557dc635 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-deleted-dw.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-deleted-dw.md @@ -1,14 +1,14 @@ --- title: Restore a deleted dedicated SQL pool (formerly SQL DW) description: How to guide for restoring a deleted dedicated SQL pool in Azure Synapse Analytics. -author: anumjs +author: joannapea manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 08/29/2018 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-from-geo-backup.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-from-geo-backup.md index 1b86a99d7377..a5e911c94064 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-from-geo-backup.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-from-geo-backup.md @@ -1,14 +1,14 @@ --- title: Restore a dedicated SQL pool from a geo-backup description: How-to guide for geo-restoring a dedicated SQL pool in Azure Synapse Analytics -author: anumjs +author: joannapea manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/13/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-points.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-points.md index d11d1973233c..7f91aaf2f3ce 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-points.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-restore-points.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 07/03/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md index 97e46fb008b3..ee2f0da2eb35 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 2/19/2020 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-source-control-integration.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-source-control-integration.md index cd3154806c1b..4dcaac2606d3 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-source-control-integration.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-source-control-integration.md @@ -8,7 +8,7 @@ ms.topic: overview ms.subservice: sql-dw ms.date: 08/23/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Source Control Integration for dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-table-constraints.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-table-constraints.md index 53749d007e26..6257e7cc54d2 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-table-constraints.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-table-constraints.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 09/05/2019 ms.author: emtehran -ms.reviewer: nibruno; jrasnick +ms.reviewer: nibruno; wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-identity.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-identity.md index 04745a4203c5..ffcdf2b9256a 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-identity.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-identity.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 07/20/2020 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-statistics.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-statistics.md index b152f95df350..cba2424f3804 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-statistics.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-tables-statistics.md @@ -1,14 +1,13 @@ --- title: Create and update statistics on tables description: Recommendations and examples for creating and updating query-optimization statistics on tables in dedicated SQL pool. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql-dw ms.date: 05/09/2018 -ms.author: xiaoyul -ms.reviewer: igorstan +author: mstehrani +ms.author: emtehran +ms.reviewer: wiassaf ms.custom: seo-lt-2019, azure-synapse --- @@ -105,39 +104,39 @@ actualRowCounts.logical_table_name, statsRowCounts.stats_row_count, actualRowCounts.actual_row_count, row_count_difference = CASE - WHEN actualRowCounts.actual_row_count >= statsRowCounts.stats_row_count THEN actualRowCounts.actual_row_count - statsRowCounts.stats_row_count - ELSE statsRowCounts.stats_row_count - actualRowCounts.actual_row_count + WHEN actualRowCounts.actual_row_count >= statsRowCounts.stats_row_count THEN actualRowCounts.actual_row_count - statsRowCounts.stats_row_count + ELSE statsRowCounts.stats_row_count - actualRowCounts.actual_row_count END, percent_deviation_from_actual = CASE - WHEN actualRowCounts.actual_row_count = 0 THEN statsRowCounts.stats_row_count - WHEN statsRowCounts.stats_row_count = 0 THEN actualRowCounts.actual_row_count - WHEN actualRowCounts.actual_row_count >= statsRowCounts.stats_row_count THEN CONVERT(NUMERIC(18, 0), CONVERT(NUMERIC(18, 2), (actualRowCounts.actual_row_count - statsRowCounts.stats_row_count)) / CONVERT(NUMERIC(18, 2), actualRowCounts.actual_row_count) * 100) - ELSE CONVERT(NUMERIC(18, 0), CONVERT(NUMERIC(18, 2), (statsRowCounts.stats_row_count - actualRowCounts.actual_row_count)) / CONVERT(NUMERIC(18, 2), actualRowCounts.actual_row_count) * 100) + WHEN actualRowCounts.actual_row_count = 0 THEN statsRowCounts.stats_row_count + WHEN statsRowCounts.stats_row_count = 0 THEN actualRowCounts.actual_row_count + WHEN actualRowCounts.actual_row_count >= statsRowCounts.stats_row_count THEN CONVERT(NUMERIC(18, 0), CONVERT(NUMERIC(18, 2), (actualRowCounts.actual_row_count - statsRowCounts.stats_row_count)) / CONVERT(NUMERIC(18, 2), actualRowCounts.actual_row_count) * 100) + ELSE CONVERT(NUMERIC(18, 0), CONVERT(NUMERIC(18, 2), (statsRowCounts.stats_row_count - actualRowCounts.actual_row_count)) / CONVERT(NUMERIC(18, 2), actualRowCounts.actual_row_count) * 100) END from ( - select distinct object_id from sys.stats where stats_id > 1 + select distinct object_id from sys.stats where stats_id > 1 ) objIdsWithStats left join ( - select object_id, sum(rows) as stats_row_count from sys.partitions group by object_id + select object_id, sum(rows) as stats_row_count from sys.partitions group by object_id ) statsRowCounts on objIdsWithStats.object_id = statsRowCounts.object_id left join ( - SELECT sm.name [schema] , - tb.name logical_table_name , - tb.object_id object_id , - SUM(rg.row_count) actual_row_count - FROM sys.schemas sm - INNER JOIN sys.tables tb ON sm.schema_id = tb.schema_id - INNER JOIN sys.pdw_table_mappings mp ON tb.object_id = mp.object_id - INNER JOIN sys.pdw_nodes_tables nt ON nt.name = mp.physical_name - INNER JOIN sys.dm_pdw_nodes_db_partition_stats rg ON rg.object_id = nt.object_id - AND rg.pdw_node_id = nt.pdw_node_id - AND rg.distribution_id = nt.distribution_id - WHERE rg.index_id = 1 - GROUP BY sm.name, tb.name, tb.object_id + SELECT sm.name [schema] , + tb.name logical_table_name , + tb.object_id object_id , + SUM(rg.row_count) actual_row_count + FROM sys.schemas sm + INNER JOIN sys.tables tb ON sm.schema_id = tb.schema_id + INNER JOIN sys.pdw_table_mappings mp ON tb.object_id = mp.object_id + INNER JOIN sys.pdw_nodes_tables nt ON nt.name = mp.physical_name + INNER JOIN sys.dm_pdw_nodes_db_partition_stats rg ON rg.object_id = nt.object_id + AND rg.pdw_node_id = nt.pdw_node_id + AND rg.distribution_id = nt.distribution_id + WHERE rg.index_id = 1 + GROUP BY sm.name, tb.name, tb.object_id ) actualRowCounts on objIdsWithStats.object_id = actualRowCounts.object_id diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-troubleshoot-connectivity.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-troubleshoot-connectivity.md index 96e126d00093..2cbeee8f46c1 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-troubleshoot-connectivity.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-troubleshoot-connectivity.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 03/27/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: "seo-lt-2019, azure-synapse, devx-track-csharp" --- diff --git a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-videos.md b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-videos.md index 40a2f02ee360..6df0b0c8f445 100644 --- a/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-videos.md +++ b/articles/synapse-analytics/sql-data-warehouse/sql-data-warehouse-videos.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 02/15/2019 ms.author: wiassaf -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/striim-quickstart.md b/articles/synapse-analytics/sql-data-warehouse/striim-quickstart.md index 2f90d5ec6e0b..644f7db1c562 100644 --- a/articles/synapse-analytics/sql-data-warehouse/striim-quickstart.md +++ b/articles/synapse-analytics/sql-data-warehouse/striim-quickstart.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 10/12/2018 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019 --- diff --git a/articles/synapse-analytics/sql-data-warehouse/what-is-a-data-warehouse-unit-dwu-cdwu.md b/articles/synapse-analytics/sql-data-warehouse/what-is-a-data-warehouse-unit-dwu-cdwu.md index 7d60b618456f..bebc9c4d5a9e 100644 --- a/articles/synapse-analytics/sql-data-warehouse/what-is-a-data-warehouse-unit-dwu-cdwu.md +++ b/articles/synapse-analytics/sql-data-warehouse/what-is-a-data-warehouse-unit-dwu-cdwu.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql-dw ms.date: 11/22/2019 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: seo-lt-2019, devx-track-azurepowershell --- diff --git a/articles/synapse-analytics/sql/data-loading-best-practices.md b/articles/synapse-analytics/sql/data-loading-best-practices.md index 63c7279a74c4..2cdc2eaea755 100644 --- a/articles/synapse-analytics/sql/data-loading-best-practices.md +++ b/articles/synapse-analytics/sql/data-loading-best-practices.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 08/26/2021 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf ms.custom: azure-synapse --- diff --git a/articles/synapse-analytics/sql/develop-materialized-view-performance-tuning.md b/articles/synapse-analytics/sql/develop-materialized-view-performance-tuning.md index 44a91f9db68d..21620b975e01 100644 --- a/articles/synapse-analytics/sql/develop-materialized-view-performance-tuning.md +++ b/articles/synapse-analytics/sql/develop-materialized-view-performance-tuning.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 ms.author: xiaoyul -ms.reviewer: nibruno; jrasnick +ms.reviewer: nibruno; wiassaf --- # Performance tuning with materialized views using dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/develop-storage-files-storage-access-control.md b/articles/synapse-analytics/sql/develop-storage-files-storage-access-control.md index 192510c1cfea..99add2eab33d 100644 --- a/articles/synapse-analytics/sql/develop-storage-files-storage-access-control.md +++ b/articles/synapse-analytics/sql/develop-storage-files-storage-access-control.md @@ -114,7 +114,7 @@ You can use the following combinations of authorization and Azure Storage types: ## Firewall protected storage -You can configure storage accounts to allow access to specific serverless SQL pool by creating a [resource instance rule](../../storage/common/storage-network-security.md?tabs=azure-portal#grant-access-from-azure-resource-instances-preview). +You can configure storage accounts to allow access to specific serverless SQL pool by creating a [resource instance rule](../../storage/common/storage-network-security.md?tabs=azure-portal#grant-access-from-azure-resource-instances). When accessing storage that is protected with the firewall, you can use **User Identity** or **Managed Identity**. > [!NOTE] diff --git a/articles/synapse-analytics/sql/develop-stored-procedures.md b/articles/synapse-analytics/sql/develop-stored-procedures.md index 9ea6f6172ee6..eaac9607138b 100644 --- a/articles/synapse-analytics/sql/develop-stored-procedures.md +++ b/articles/synapse-analytics/sql/develop-stored-procedures.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 11/03/2020 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Stored procedures using Synapse SQL in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/develop-tables-external-tables.md b/articles/synapse-analytics/sql/develop-tables-external-tables.md index a07ef9bb6a70..cd304dfda201 100644 --- a/articles/synapse-analytics/sql/develop-tables-external-tables.md +++ b/articles/synapse-analytics/sql/develop-tables-external-tables.md @@ -182,7 +182,7 @@ CREATE EXTERNAL DATA SOURCE SqlOnDemandDemo WITH ( ); ``` > [!NOTE] -> The SQL users needs to have proper permissions on database scoped credentials to access the data source in Azure Synapse Analytics Serverless SQL Pool. [Access external storage using serverless SQL pool in Azure Synapse Analytics](https://docs.microsoft.com/azure/synapse-analytics/sql/develop-storage-files-overview?tabs=impersonation#permissions). +> The SQL users needs to have proper permissions on database scoped credentials to access the data source in Azure Synapse Analytics Serverless SQL Pool. [Access external storage using serverless SQL pool in Azure Synapse Analytics](./develop-storage-files-overview.md?tabs=impersonation#permissions). The following example creates an external data source for Azure Data Lake Gen2 pointing to the publicly available New York data set: @@ -485,4 +485,4 @@ The external table is now created, for future exploration of the content of this ## Next steps -See the [CETAS](develop-tables-cetas.md) article for how to save query results to an external table in Azure Storage. Or you can start querying [Apache Spark for Azure Synapse external tables](develop-storage-files-spark-tables.md). +See the [CETAS](develop-tables-cetas.md) article for how to save query results to an external table in Azure Storage. Or you can start querying [Apache Spark for Azure Synapse external tables](develop-storage-files-spark-tables.md). \ No newline at end of file diff --git a/articles/synapse-analytics/sql/develop-transaction-best-practices.md b/articles/synapse-analytics/sql/develop-transaction-best-practices.md index 142b973f90da..d95220588858 100644 --- a/articles/synapse-analytics/sql/develop-transaction-best-practices.md +++ b/articles/synapse-analytics/sql/develop-transaction-best-practices.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 ms.author: emtehran -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Optimize transactions with dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/develop-transactions.md b/articles/synapse-analytics/sql/develop-transactions.md index 28a208df16d8..b7ec4aa5779c 100644 --- a/articles/synapse-analytics/sql/develop-transactions.md +++ b/articles/synapse-analytics/sql/develop-transactions.md @@ -1,14 +1,13 @@ --- -title: Use transactions +title: Use transactions with dedicated SQL pool in Azure Synapse Analytics description: Tips for implementing transactions with dedicated SQL pool in Azure Synapse Analytics for developing solutions. -author: XiaoyuMSFT -manager: craigg ms.service: synapse-analytics ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 -ms.author: xiaoyul -ms.reviewer: igorstan +author: KevinConanMSFT +ms.author: kecona +ms.reviewer: wiassaf --- # Use transactions with dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/load-data-overview.md b/articles/synapse-analytics/sql/load-data-overview.md index a50209b41004..153791bd944d 100644 --- a/articles/synapse-analytics/sql/load-data-overview.md +++ b/articles/synapse-analytics/sql/load-data-overview.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 ms.author: joanpo -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Design a PolyBase data loading strategy for dedicated SQL pool in Azure Synapse Analytics diff --git a/articles/synapse-analytics/sql/overview-architecture.md b/articles/synapse-analytics/sql/overview-architecture.md index fe2e03d81781..6238bd617674 100644 --- a/articles/synapse-analytics/sql/overview-architecture.md +++ b/articles/synapse-analytics/sql/overview-architecture.md @@ -8,7 +8,7 @@ ms.topic: conceptual ms.subservice: sql ms.date: 04/15/2020 ms.author: martinle -ms.reviewer: igorstan +ms.reviewer: wiassaf --- # Azure Synapse SQL architecture diff --git a/articles/synapse-analytics/sql/query-folders-multiple-csv-files.md b/articles/synapse-analytics/sql/query-folders-multiple-csv-files.md index 950b1c5b130f..73d37980a4d7 100644 --- a/articles/synapse-analytics/sql/query-folders-multiple-csv-files.md +++ b/articles/synapse-analytics/sql/query-folders-multiple-csv-files.md @@ -207,7 +207,7 @@ Since you have only one folder that matches the criteria, the query result is th ## Traverse folders recursively -Serverless SQL pool can recursively traverse folders if you specify /** at the end of path. The following query will read all files from all folders and subfolders located in the *csv* folder. +Serverless SQL pool can recursively traverse folders if you specify /** at the end of path. The following query will read all files from all folders and subfolders located in the *csv/taxi* folder. ```sql SELECT diff --git a/articles/synapse-analytics/sql/resources-self-help-sql-on-demand.md b/articles/synapse-analytics/sql/resources-self-help-sql-on-demand.md index e847bf423215..41e554027eb3 100644 --- a/articles/synapse-analytics/sql/resources-self-help-sql-on-demand.md +++ b/articles/synapse-analytics/sql/resources-self-help-sql-on-demand.md @@ -643,7 +643,7 @@ Confirm the storage account accessed is using the Archive access tier. The Archive access tier is an offline tier. While a blob is in the Archive access tier, it can't be read or modified. -To read or download a blob in the Archive tier, rehydrate it to an online tier. See [Archive access tier](/azure/storage/blobs/access-tiers-overview#archive-access-tier). +To read or download a blob in the Archive tier, rehydrate it to an online tier. See [Archive access tier](../../storage/blobs/access-tiers-overview.md#archive-access-tier). ### [0x80070057](#tab/x80070057) @@ -1070,4 +1070,4 @@ If you have [partitioned files](query-specific-files.md), make sure you use [par ### Copy and transform data (CETAS) -Learn how to [store query results to storage](create-external-table-as-select.md) by using the CETAS command. +Learn how to [store query results to storage](create-external-table-as-select.md) by using the CETAS command. \ No newline at end of file diff --git a/articles/synapse-analytics/synapse-link/faq.yml b/articles/synapse-analytics/synapse-link/faq.yml index f09829daf609..aaaf3d2fa9f6 100644 --- a/articles/synapse-analytics/synapse-link/faq.yml +++ b/articles/synapse-analytics/synapse-link/faq.yml @@ -59,5 +59,5 @@ sections: - question: | How should I select the structure type of my destination table in the Synapse dedicated SQL pool? answer: | - You can refer to (Indexing tables - Azure Synapse Analytics | Microsoft Docs)[../sql-data-warehouse/sql-data-warehouse-tables-index.md] to understand the three options for table structure type. When clustered columnstore index is chosen, data type with max length (eg. VARCHAR(MAX)) is not supported. + You can refer to [Indexing tables - Azure Synapse Analytics | Microsoft Docs](../sql-data-warehouse/sql-data-warehouse-tables-index.md) to understand the three options for table structure type. When clustered columnstore index is chosen, data type with max length (eg. VARCHAR(MAX)) is not supported. diff --git a/articles/synapse-analytics/toc.yml b/articles/synapse-analytics/toc.yml index f08adfe7e51f..efea8fb93d6d 100644 --- a/articles/synapse-analytics/toc.yml +++ b/articles/synapse-analytics/toc.yml @@ -1512,8 +1512,42 @@ items: items: - name: Migration guides items: + - name: All migration guides + href: migration-guides/index.yml - name: Migrate to a dedicated SQL pool href: migration-guides/migrate-to-synapse-analytics-guide.md + - name: From Teradata + items: + - name: 1 Design performance for Teradata migration + href: migration-guides/teradata/1-design-performance-migration.md + - name: 2 ETL and load migration considerations + href: migration-guides/teradata/2-etl-load-migration-considerations.md + - name: 3 Security access operations + href: migration-guides/teradata/3-security-access-operations.md + - name: 4 Visualization and reporting + href: migration-guides/teradata/4-visualization-reporting.md + - name: 5 Minimizing SQL issues + href: migration-guides/teradata/5-minimize-sql-issues.md + - name: 6 Microsoft and third-party tools + href: migration-guides/teradata/6-microsoft-third-party-migration-tools.md + - name: 7 Implementing modern data warehouses + href: migration-guides/teradata/7-beyond-data-warehouse-migration.md + - name: From Netezza + items: + - name: 1 Design performance for Netezza migration + href: migration-guides/netezza/1-design-performance-migration.md + - name: 2 ETL and load migration considerations + href: migration-guides/netezza/2-etl-load-migration-considerations.md + - name: 3 Security access operations + href: migration-guides/netezza/3-security-access-operations.md + - name: 4 Visualization and reporting + href: migration-guides/netezza/4-visualization-reporting.md + - name: 5 Minimizing SQL issues + href: migration-guides/netezza/5-minimize-sql-issues.md + - name: 6 Microsoft and third-party tools + href: migration-guides/netezza/6-microsoft-third-party-migration-tools.md + - name: 7 Implementing modern data warehouses + href: migration-guides/netezza/7-beyond-data-warehouse-migration.md - name: Security white paper items: - name: Introduction diff --git a/articles/virtual-desktop/configure-vm-gpu.md b/articles/virtual-desktop/configure-vm-gpu.md index 1f569aa0e87d..ce043d0a879d 100644 --- a/articles/virtual-desktop/configure-vm-gpu.md +++ b/articles/virtual-desktop/configure-vm-gpu.md @@ -1,10 +1,10 @@ --- title: Configure GPU for Azure Virtual Desktop - Azure description: How to enable GPU-accelerated rendering and encoding in Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: how-to ms.date: 05/06/2019 -ms.author: denisgun +ms.author: femila --- # Configure graphics processing unit (GPU) acceleration for Azure Virtual Desktop diff --git a/articles/virtual-desktop/create-profile-container-azure-ad.md b/articles/virtual-desktop/create-profile-container-azure-ad.md index be12b3ecc956..c7511f8c1183 100644 --- a/articles/virtual-desktop/create-profile-container-azure-ad.md +++ b/articles/virtual-desktop/create-profile-container-azure-ad.md @@ -350,7 +350,9 @@ This section will show you how to configure a VM with FSLogix. You'll need to fo To configure FSLogix: -1. [Update or install FSLogix](/fslogix/install-ht) on your session host, if needed. +1. [Update or install FSLogix](/fslogix/install-ht) on your session host, if needed. + > [!NOTE] + > If the session host is created using the Azure Virtual Desktop service, FSLogix should already be pre-installed. 2. Follow the instructions in [Configure profile container registry settings](/fslogix/configure-profile-container-tutorial#configure-profile-container-registry-settings) to create the **Enabled** and **VHDLocations** registry values. Set the value of **VHDLocations** to `\\.file.core.windows.net\`. diff --git a/articles/virtual-desktop/faq.yml b/articles/virtual-desktop/faq.yml index 2f30e18009e7..f3d045642c6a 100644 --- a/articles/virtual-desktop/faq.yml +++ b/articles/virtual-desktop/faq.yml @@ -100,7 +100,7 @@ sections: - question: | How does Azure Virtual Desktop handle backups? - answer: There are multiple options in Azure Virtual Desktop for handling backup. At the Compute level, backup is recommended only for Personal Host Pools through [Azure Backup](https://docs.microsoft.com/azure/backup/backup-azure-vms-introduction). At the Storage level, recomemmended backup solution varies based on the backend storage used to store user profiles. If Azure Files Share is used, [Azure Backup for File Share](https://docs.microsoft.com/azure/backup/azure-file-share-backup-overview) is recommended. If Azure NetApp Files is used, [Snaphots/Policies](https://docs.microsoft.com/azure/azure-netapp-files/snapshots-manage-policy) or [Azure NetApp Files Backup](https://docs.microsoft.com/en-us/azure/azure-netapp-files/backup-introductionhttps://docs.microsoft.com/azure/azure-netapp-files/backup-introduction) are tools available. + answer: There are multiple options in Azure Virtual Desktop for handling backup. At the Compute level, backup is recommended only for Personal Host Pools through [Azure Backup](../backup/backup-azure-vms-introduction.md). At the Storage level, recomemmended backup solution varies based on the backend storage used to store user profiles. If Azure Files Share is used, [Azure Backup for File Share](../backup/azure-file-share-backup-overview.md) is recommended. If Azure NetApp Files is used, [Snaphots/Policies](../azure-netapp-files/snapshots-manage-policy.md) or [Azure NetApp Files Backup](https://docs.microsoft.com/en-us/azure/azure-netapp-files/backup-introductionhttps://docs.microsoft.com/azure/azure-netapp-files/backup-introduction) are tools available. - question: | Does Azure Virtual Desktop support third-party collaboration apps? @@ -180,4 +180,4 @@ sections: - question: | When I'm testing migration, can I have the two different Azure Virtual Desktop environments exist in the same tenant? answer: | - Yes. You can have both deployments within the same Azure Active Directory tenant. + Yes. You can have both deployments within the same Azure Active Directory tenant. \ No newline at end of file diff --git a/articles/virtual-desktop/network-connectivity.md b/articles/virtual-desktop/network-connectivity.md index b6dd2f63d2c1..97af3e3d7f5b 100644 --- a/articles/virtual-desktop/network-connectivity.md +++ b/articles/virtual-desktop/network-connectivity.md @@ -2,10 +2,10 @@ title: Understanding Azure Virtual Desktop network connectivity titleSuffix: Azure description: Learn about Azure Virtual Desktop network connectivity -author: gundarev +author: femila ms.topic: conceptual ms.date: 11/16/2020 -ms.author: denisgun +ms.author: femila --- # Understanding Azure Virtual Desktop network connectivity diff --git a/articles/virtual-desktop/rdp-bandwidth.md b/articles/virtual-desktop/rdp-bandwidth.md index add738b6ebab..35e0433a2bfd 100644 --- a/articles/virtual-desktop/rdp-bandwidth.md +++ b/articles/virtual-desktop/rdp-bandwidth.md @@ -2,10 +2,10 @@ title: Remote Desktop Protocol bandwidth requirements Azure Virtual Desktop - Azure titleSuffix: Azure description: Understanding RDP bandwidth requirements for Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: conceptual ms.date: 11/16/2020 -ms.author: denisgun +ms.author: femila --- # Remote Desktop Protocol (RDP) bandwidth requirements diff --git a/articles/virtual-desktop/rdp-quality-of-service-qos.md b/articles/virtual-desktop/rdp-quality-of-service-qos.md index 02a1502d1bfb..b5d7bd91bb4d 100644 --- a/articles/virtual-desktop/rdp-quality-of-service-qos.md +++ b/articles/virtual-desktop/rdp-quality-of-service-qos.md @@ -2,10 +2,10 @@ title: Implement Quality of Service (QoS) for Azure Virtual Desktop titleSuffix: Azure description: How to set up QoS for Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: conceptual ms.date: 10/18/2021 -ms.author: denisgun +ms.author: femila --- # Implement Quality of Service (QoS) for Azure Virtual Desktop diff --git a/articles/virtual-desktop/safe-url-list.md b/articles/virtual-desktop/safe-url-list.md index e7624eed728c..a4b1cc9c6e1d 100644 --- a/articles/virtual-desktop/safe-url-list.md +++ b/articles/virtual-desktop/safe-url-list.md @@ -3,7 +3,7 @@ title: Azure Virtual Desktop required URL list - Azure description: A list of URLs you must unblock to ensure your Azure Virtual Desktop deployment works as intended. author: Heidilohr ms.topic: conceptual -ms.date: 05/12/2022 +ms.date: 05/26/2022 ms.author: helohr manager: femila --- @@ -79,6 +79,8 @@ The Azure virtual machines you create for Azure Virtual Desktop must have access |wvdportalstorageblob.blob.core.windows.net|443|Azure portal support|AzureCloud| | 169.254.169.254 | 80 | [Azure Instance Metadata service endpoint](../virtual-machines/windows/instance-metadata-service.md) | N/A | | 168.63.129.16 | 80 | [Session host health monitoring](../virtual-network/network-security-groups-overview.md#azure-platform-considerations) | N/A | +| oneocsp.microsoft.com | 443 | Certificates | N/A | +| microsoft.com | 443 | Certificates | N/A | A [Service Tag](../virtual-network/service-tags-overview.md) represents a group of IP address prefixes from a given Azure service. Microsoft manages the address prefixes encompassed by the service tag and automatically updates the service tag as addresses change, minimizing the complexity of frequent updates to network security rules. Service Tags can be used in both Network Security Group ([NSG](../virtual-network/network-security-groups-overview.md)) and [Azure Firewall](../firewall/service-tags.md) rules to restrict outbound network access. Service Tags can be also used in User Defined Route ([UDR](../virtual-network/virtual-networks-udr-overview.md#user-defined)) to customize traffic routing behavior. @@ -112,6 +114,7 @@ The Azure virtual machines you create for Azure Virtual Desktop must have access |wvdportalstorageblob.blob.core.usgovcloudapi.net|443|Azure portal support|AzureCloud| | 169.254.169.254 | 80 | [Azure Instance Metadata service endpoint](../virtual-machines/windows/instance-metadata-service.md) | N/A | | 168.63.129.16 | 80 | [Session host health monitoring](../virtual-network/network-security-groups-overview.md#azure-platform-considerations) | N/A | +| ocsp.msocsp.com | 443 | Certificates | N/A | > [!IMPORTANT] > We are currently transitioning the URLs we use for Agent traffic. We still support the URLs below, however we encourage you to switch to ***.prod.warm.ingest.monitor.core.usgovcloudapi.net** as soon as possible. diff --git a/articles/virtual-desktop/screen-capture-protection.md b/articles/virtual-desktop/screen-capture-protection.md index 5e7e2710ad89..bce6d5cc8af4 100644 --- a/articles/virtual-desktop/screen-capture-protection.md +++ b/articles/virtual-desktop/screen-capture-protection.md @@ -2,10 +2,10 @@ title: Azure Virtual Desktop screen capture protection titleSuffix: Azure description: How to set up screen capture protection for Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: conceptual ms.date: 08/30/2021 -ms.author: denisgun +ms.author: femila ms.service: virtual-desktop --- diff --git a/articles/virtual-desktop/shortpath-public.md b/articles/virtual-desktop/shortpath-public.md index b06fb5f85631..0e05df80a8bf 100644 --- a/articles/virtual-desktop/shortpath-public.md +++ b/articles/virtual-desktop/shortpath-public.md @@ -2,10 +2,10 @@ title: Azure Virtual Desktop RDP Shortpath for public networks (preview) - Azure titleSuffix: Azure description: How to set up RDP Shortpath for public networks for Azure Virtual Desktop (preview). -author: gundarev +author: femila ms.topic: conceptual ms.date: 04/13/2022 -ms.author: denisgun +ms.author: femila --- # Azure Virtual Desktop RDP Shortpath for public networks (preview) diff --git a/articles/virtual-desktop/shortpath.md b/articles/virtual-desktop/shortpath.md index d9030aff4620..29ec79d65743 100644 --- a/articles/virtual-desktop/shortpath.md +++ b/articles/virtual-desktop/shortpath.md @@ -2,10 +2,10 @@ title: Azure Virtual Desktop RDP Shortpath for managed networks titleSuffix: Azure description: How to set up RDP Shortpath for managed networks for Azure Virtual Desktop. -author: gundarev +author: femila ms.topic: conceptual ms.date: 03/08/2022 -ms.author: denisgun +ms.author: femila --- # Azure Virtual Desktop RDP Shortpath for managed networks diff --git a/articles/virtual-desktop/virtual-desktop-fall-2019/configure-vm-gpu-2019.md b/articles/virtual-desktop/virtual-desktop-fall-2019/configure-vm-gpu-2019.md index 0fc5a9016e9d..bec6d2c1765d 100644 --- a/articles/virtual-desktop/virtual-desktop-fall-2019/configure-vm-gpu-2019.md +++ b/articles/virtual-desktop/virtual-desktop-fall-2019/configure-vm-gpu-2019.md @@ -1,10 +1,10 @@ --- title: Configure GPU for Azure Virtual Desktop (classic) - Azure description: How to enable GPU-accelerated rendering and encoding in Azure Virtual Desktop (classic). -author: gundarev +author: femila ms.topic: how-to ms.date: 03/30/2020 -ms.author: denisgun +ms.author: femila --- # Configure graphics processing unit (GPU) acceleration for Azure Virtual Desktop (classic) diff --git a/articles/virtual-machines/TOC.yml b/articles/virtual-machines/TOC.yml index 4d905ea175f3..fa99662a058e 100644 --- a/articles/virtual-machines/TOC.yml +++ b/articles/virtual-machines/TOC.yml @@ -1290,6 +1290,8 @@ href: ephemeral-os-disks.md - name: Create a VM using ephemeral OS disks href: ephemeral-os-disks-deploy.md + - name: FAQ on ephemeral OS disks + href: ephemeral-os-disks-faq.md - name: Securely import/export a disk items: - name: Configure private links for disks - CLI diff --git a/articles/virtual-machines/eav4-easv4-series.md b/articles/virtual-machines/eav4-easv4-series.md index 2ca6a7ab7eed..7ce0468a6c07 100644 --- a/articles/virtual-machines/eav4-easv4-series.md +++ b/articles/virtual-machines/eav4-easv4-series.md @@ -56,7 +56,7 @@ Eav4-series sizes are based on the 2.35Ghz AMD EPYCTM 7452 processor [VM Generation Support](generation-2.md): Generations 1 and 2
    [Accelerated Networking](../virtual-network/create-vm-accelerated-networking-cli.md): Supported
    [Ephemeral OS Disks](ephemeral-os-disks.md): Supported
    -[Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Supported
    +[Nested Virtualization](/virtualization/hyper-v-on-windows/user-guide/nested-virtualization): Not Supported

    Easv4-series sizes are based on the 2.35Ghz AMD EPYCTM 7452 processor that can achieve a boosted maximum frequency of 3.35GHz and use premium SSD. The Easv4-series sizes are ideal for memory-intensive enterprise applications. diff --git a/articles/virtual-machines/ephemeral-os-disks-faq.md b/articles/virtual-machines/ephemeral-os-disks-faq.md new file mode 100644 index 000000000000..e53e7885aa21 --- /dev/null +++ b/articles/virtual-machines/ephemeral-os-disks-faq.md @@ -0,0 +1,71 @@ +--- +title: FAQ Ephemeral OS disks +description: Frequently asked questions on ephemeral OS disks for Azure VMs. +author: Aarthi-Vijayaraghavan +ms.service: virtual-machines +ms.workload: infrastructure-services +ms.topic: how-to +ms.date: 05/26/2022 +ms.author: aarthiv +ms.subservice: disks +ms.custom: devx-track-azurepowershell, devx-track-azurecli +--- + +# Frequently asked questions about Ephemeral OS disks + +**Q: What is the size of the local OS Disks?** + +A: We support platform, Shared Image Gallery, and custom images, up to the VM cache size with OS cache placement and up to Temp disk size with Temp disk placement, where all read/writes to the OS disk will be local on the same node as the Virtual Machine. + +**Q: Can the ephemeral OS disk be resized?** + +A: No, once the ephemeral OS disk is provisioned, the OS disk cannot be resized. + +**Q: Can the ephemeral OS disk placement be modified after creation of VM?** + +A: No, once the ephemeral OS disk is provisioned, the OS disk placement cannot be changed. But the VM can be recreated via ARM template deployment/PowerShell/CLI by updating the OS disk placement of choosing. This would result in the recreation of the VM with Data on the OS disk deleted and OS is reprovisioned. + +**Q: Is there any Temp disk created if image size equals to Temp disk size of VM size selected?** + +A: No, in that case, there won't be any Temp disk drive created. + +**Q: Are Ephemeral OS disks supported on low-priority VMs and Spot VMs?** + +A: Yes. There is no option of Stop-Deallocate for Ephemeral VMs, rather users need to Delete instead of deallocating them. + +**Q: Can I attach a Managed Disks to an Ephemeral VM?** + +A: Yes, you can attach a managed data disk to a VM that uses an ephemeral OS disk. + +**Q: Will all VM sizes be supported for ephemeral OS disks?** + +A: No, most Premium Storage VM sizes are supported (DS, ES, FS, GS, M, etc.). To know whether a particular VM size supports ephemeral OS disks, you can: + +Call `Get-AzComputeResourceSku` PowerShell cmdlet +```azurepowershell-interactive + +$vmSizes=Get-AzComputeResourceSku | where{$_.ResourceType -eq 'virtualMachines' -and $_.Locations.Contains('CentralUSEUAP')} + +foreach($vmSize in $vmSizes) +{ + foreach($capability in $vmSize.capabilities) + { + if($capability.Name -eq 'EphemeralOSDiskSupported' -and $capability.Value -eq 'true') + { + $vmSize + } + } +} +``` + +**Q: Can the ephemeral OS disk be applied to existing VMs and scale sets?** + +A: No, ephemeral OS disk can only be used during VM and scale set creation. + +**Q: Can you mix ephemeral and normal OS disks in a scale set?** + +A: No, you can't have a mix of ephemeral and persistent OS disk instances within the same scale set. + +**Q: Can the ephemeral OS disk be created using PowerShell or CLI?** + +A: Yes, you can create VMs with Ephemeral OS Disk using REST, Templates, PowerShell, and CLI. \ No newline at end of file diff --git a/articles/virtual-machines/ephemeral-os-disks.md b/articles/virtual-machines/ephemeral-os-disks.md index 027acd5c622b..d9479774b3ca 100644 --- a/articles/virtual-machines/ephemeral-os-disks.md +++ b/articles/virtual-machines/ephemeral-os-disks.md @@ -42,7 +42,12 @@ Key differences between persistent and ephemeral OS disks: | **Redeploy** | OS disk data is preserved | Data on the OS disk is deleted, OS is reprovisioned | | **Stop/ Start of VM** | OS disk data is preserved | Not Supported | | **Page file placement**| For Windows, page file is stored on the resource disk| For Windows, page file is stored on the OS disk (for both OS cache placement and Temp disk placement).| +| **Maintenance of VM/VMSS using [healing](understand-vm-reboots.md#unexpected-downtime)** | OS disk data is preserved | OS disk data is not preserved | +| **Maintenance of VM/VMSS using [Live Migration](maintenance-and-updates.md#live-migration)** | OS disk data is preserved | OS disk data is preserved | +## Placement options for Ephemeral OS disks +Ephemeral OS disk can be stored either on VM's OS cache disk or VM's temp/resource disk. +[DiffDiskPlacement](/rest/api/compute/virtualmachines/list#diffdiskplacement) is the new property that can be used to specify where you want to place the Ephemeral OS disk. With this feature, when a Windows VM is provisioned, we configure the pagefile to be located on the OS Disk. ## Size requirements @@ -55,13 +60,11 @@ If you want to opt for **Temp disk placement**: Standard Ubuntu server image fro > [!Important] > If opting for temp disk placement the Final Temp disk size = (Initial temp disk size - OS image size). +In the case of **Temp disk placement** as Ephemeral OS disk is placed on temp disk it will share the IOPS with temp disk as per the VM size chosen by you. + Basic Linux and Windows Server images in the Marketplace that are denoted by `[smallsize]` tend to be around 30 GiB and can use most of the available VM sizes. Ephemeral disks also require that the VM size supports **Premium storage**. The sizes usually (but not always) have an `s` in the name, like DSv2 and EsV3. For more information, see [Azure VM sizes](sizes.md) for details around which sizes support Premium storage. -## Placement options for Ephemeral OS disks -Ephemeral OS disk can be stored either on VM's OS cache disk or VM's temp/resource disk. -[DiffDiskPlacement](/rest/api/compute/virtualmachines/list#diffdiskplacement) is the new property that can be used to specify where you want to place the Ephemeral OS disk. -With this feature, when a Windows VM is provisioned, we configure the pagefile to be located on the OS Disk. ## Unsupported features - Capturing VM images @@ -80,71 +83,12 @@ For example, If you try to create a Trusted launch Ephemeral OS disk VM using OS This is because the temp storage for [Standard_DS4_v2](dv2-dsv2-series.md) is 56 GiB, and 1 GiB is reserved for VMGS when using trusted launch. For the same example above if you create a standard Ephemeral OS disk VM you would not get any errors and it would be a successful operation. -> [!NOTE] +> [!Important] > > While using ephemeral disks for Trusted Launch VMs, keys and secrets generated or sealed by the vTPM after VM creation may not be persisted for operations like reimaging and platform events like service healing. > For more information on [how to deploy a trusted launch VM](trusted-launch-portal.md) -## Frequently asked questions - -**Q: What is the size of the local OS Disks?** - -A: We support platform, Shared Image Gallery, and custom images, up to the VM cache size with OS cache placement and up to Temp disk size with Temp disk placement, where all read/writes to the OS disk will be local on the same node as the Virtual Machine. - -**Q: Can the ephemeral OS disk be resized?** - -A: No, once the ephemeral OS disk is provisioned, the OS disk cannot be resized. - -**Q: Can the ephemeral OS disk placement be modified after creation of VM?** - -A: No, once the ephemeral OS disk is provisioned, the OS disk placement cannot be changed. But the VM can be recreated via ARM template deployment/PowerShell/CLI by updating the OS disk placement of choosing. This would result in the recreation of the VM with Data on the OS disk deleted and OS is reprovisioned. - -**Q: Is there any Temp disk created if image size equals to Temp disk size of VM size selected?** - -A: No, in that case, there won't be any Temp disk drive created. - -**Q: Are Ephemeral OS disks supported on low-priority VMs and Spot VMs?** - -A: Yes. There is no option of Stop-Deallocate for Ephemeral VMs, rather users need to Delete instead of deallocating them. - -**Q: Can I attach a Managed Disks to an Ephemeral VM?** - -A: Yes, you can attach a managed data disk to a VM that uses an ephemeral OS disk. - -**Q: Will all VM sizes be supported for ephemeral OS disks?** - -A: No, most Premium Storage VM sizes are supported (DS, ES, FS, GS, M, etc.). To know whether a particular VM size supports ephemeral OS disks, you can: - -Call `Get-AzComputeResourceSku` PowerShell cmdlet -```azurepowershell-interactive - -$vmSizes=Get-AzComputeResourceSku | where{$_.ResourceType -eq 'virtualMachines' -and $_.Locations.Contains('CentralUSEUAP')} - -foreach($vmSize in $vmSizes) -{ - foreach($capability in $vmSize.capabilities) - { - if($capability.Name -eq 'EphemeralOSDiskSupported' -and $capability.Value -eq 'true') - { - $vmSize - } - } -} -``` - -**Q: Can the ephemeral OS disk be applied to existing VMs and scale sets?** - -A: No, ephemeral OS disk can only be used during VM and scale set creation. - -**Q: Can you mix ephemeral and normal OS disks in a scale set?** - -A: No, you can't have a mix of ephemeral and persistent OS disk instances within the same scale set. - -**Q: Can the ephemeral OS disk be created using PowerShell or CLI?** - -A: Yes, you can create VMs with Ephemeral OS Disk using REST, Templates, PowerShell, and CLI. - > [!NOTE] > > Ephemeral disk will not be accessible through the portal. You will receive a "Resource not Found" or "404" error when accessing the ephemeral disk which is expected. @@ -152,3 +96,4 @@ A: Yes, you can create VMs with Ephemeral OS Disk using REST, Templates, PowerSh ## Next steps Create a VM with ephemeral OS disk using [Azure Portal/CLI/Powershell/ARM template](ephemeral-os-disks-deploy.md). +Check out the [frequently asked questions on ephemeral os disk](ephemeral-os-disks-faq.md). diff --git a/articles/virtual-machines/extensions/agent-dependency-linux.md b/articles/virtual-machines/extensions/agent-dependency-linux.md index 3a5fa68e6804..d23c3233e450 100644 --- a/articles/virtual-machines/extensions/agent-dependency-linux.md +++ b/articles/virtual-machines/extensions/agent-dependency-linux.md @@ -157,7 +157,7 @@ az vm extension list --resource-group myResourceGroup --vm-name myVM -o table Extension execution output is logged to the following file: ``` -/opt/microsoft/dependency-agent/log/install.log +/var/opt/microsoft/dependency-agent/log/install.log ``` ### Support diff --git a/articles/virtual-machines/generalize.md b/articles/virtual-machines/generalize.md index b6fb370cfed3..830e207cc295 100644 --- a/articles/virtual-machines/generalize.md +++ b/articles/virtual-machines/generalize.md @@ -87,17 +87,15 @@ To generalize your Windows VM, follow these steps: 2. Open a Command Prompt window as an administrator. -3. Delete the panther directory (C:\Windows\Panther). Then change the directory to %windir%\system32\sysprep, and then run `sysprep.exe`. - -4. In the **System Preparation Tool** dialog box, select **Enter System Out-of-Box Experience (OOBE)** and select the **Generalize** check box. - -5. For **Shutdown Options**, select **Shutdown**. - -6. Select **OK**. - - :::image type="content" source="windows/media/upload-generalized-managed/sysprepgeneral.png" alt-text="![Start Sysprep](./media/upload-generalized-managed/sysprepgeneral.png)"::: +3. Delete the panther directory (C:\Windows\Panther). + +5. Then change the directory to %windir%\system32\sysprep, and then run: + ``` + sysprep /generalize /shutdown /mode:vm + ``` +6. The VM will shut down when Sysprep is finished generalizing the VM. Do not restart the VM. + -6. When Sysprep completes, it shuts down the VM. Do not restart the VM. > [!TIP] > **Optional** Use [DISM](/windows-hardware/manufacture/desktop/dism-optimize-image-command-line-options) to optimize your image and reduce your VM's first boot time. diff --git a/articles/virtual-machines/image-version.md b/articles/virtual-machines/image-version.md index 264777b0f849..a4c83ecdf0ca 100644 --- a/articles/virtual-machines/image-version.md +++ b/articles/virtual-machines/image-version.md @@ -17,7 +17,7 @@ ms.custom: # Create an image definition and an image version -A [Azure Compute Gallery](shared-image-galleries.md) (formerly known as Shared Image Gallery)simplifies custom image sharing across your organization. Custom images are like marketplace images, but you create them yourself. Images can be created from a VM, VHD, snapshot, managed image, or another image version. +A [Azure Compute Gallery](shared-image-galleries.md) (formerly known as Shared Image Gallery) simplifies custom image sharing across your organization. Custom images are like marketplace images, but you create them yourself. Images can be created from a VM, VHD, snapshot, managed image, or another image version. The Azure Compute Gallery lets you share your custom VM images with others in your organization, within or across regions, within an Azure AD tenant, or publicly using a [community gallery (preview)](azure-compute-gallery.md#community). Choose which images you want to share, which regions you want to make them available in, and who you want to share them with. You can create multiple galleries so that you can logically group images. diff --git a/articles/virtual-machines/linux/create-upload-generic.md b/articles/virtual-machines/linux/create-upload-generic.md index 21401ec3f746..7be8609ddbe5 100644 --- a/articles/virtual-machines/linux/create-upload-generic.md +++ b/articles/virtual-machines/linux/create-upload-generic.md @@ -274,7 +274,8 @@ The [Azure Linux Agent](../extensions/agent-linux.md) `waagent` provisions a Lin If you want to mount, format and create swap you can either: 1. Pass this in as a cloud-init config every time you create a VM through `customdata`. This is the recommended method. 2. Use a cloud-init directive baked into the image that will do this every time the VM is created. - ``` + + ``` echo 'DefaultEnvironment="CLOUD_CFG=/etc/cloud/cloud.cfg.d/00-azure-swap.cfg"' >> /etc/systemd/system.conf cat > /etc/cloud/cloud.cfg.d/00-azure-swap.cfg << EOF #cloud-config @@ -293,7 +294,8 @@ The [Azure Linux Agent](../extensions/agent-linux.md) `waagent` provisions a Lin - ["ephemeral0.1", "/mnt"] - ["ephemeral0.2", "none", "swap", "sw,nofail,x-systemd.requires=cloud-init.service,x-systemd.device-timeout=2", "0", "0"] EOF - ``` + + ``` 1. Deprovision. > [!CAUTION] diff --git a/articles/virtual-machines/linux/n-series-driver-setup.md b/articles/virtual-machines/linux/n-series-driver-setup.md index 680c73d06548..dac9a95e973f 100644 --- a/articles/virtual-machines/linux/n-series-driver-setup.md +++ b/articles/virtual-machines/linux/n-series-driver-setup.md @@ -4,7 +4,7 @@ description: How to set up NVIDIA GPU drivers for N-series VMs running Linux in services: virtual-machines author: vikancha-MSFT ms.service: virtual-machines -ms.subervice: vm-sizes-gpu +ms.subservice: vm-sizes-gpu ms.collection: linux ms.topic: how-to ms.workload: infrastructure-services diff --git a/articles/virtual-machines/linux/run-command.md b/articles/virtual-machines/linux/run-command.md index 2f277f4ffed5..0bbcb3ec45b5 100644 --- a/articles/virtual-machines/linux/run-command.md +++ b/articles/virtual-machines/linux/run-command.md @@ -20,7 +20,7 @@ The Run Command feature uses the virtual machine (VM) agent to run shell scripts ## Benefits -You can access your virtual machines in multiple ways. Run Command can run scripts on your virtual machines remotely by using the VM agent. You use Run Command through the Azure portal, [REST API](/rest/api/compute/virtual-machines-run-commands/run-command), or [Azure CLI](/cli/azure/vm/run-command#az-vm-run-command-invoke) for Linux VMs. +You can access your virtual machines in multiple ways. Run Command can run scripts on your virtual machines remotely by using the VM agent. You use Run Command through the Azure portal, [REST API](/rest/api/compute/virtual-machine-run-commands), or [Azure CLI](/cli/azure/vm/run-command#az-vm-run-command-invoke) for Linux VMs. This capability is useful in all scenarios where you want to run a script within a virtual machine. It's one of the only ways to troubleshoot and remediate a virtual machine that doesn't have the RDP or SSH port open because of network or administrative user configuration. diff --git a/articles/virtual-machines/linux/upload-vhd.md b/articles/virtual-machines/linux/upload-vhd.md index efa6bd22485b..ebf0e2d2623f 100644 --- a/articles/virtual-machines/linux/upload-vhd.md +++ b/articles/virtual-machines/linux/upload-vhd.md @@ -74,7 +74,7 @@ You can now upload VHD straight into a managed disk. For instructions, see [Uplo You can also create a customized VM in Azure and then copy the OS disk and attach it to a new VM to create another copy. This is fine for testing, but if you want to use an existing Azure VM as the model for multiple new VMs, create an *image* instead. For more information about creating an image from an existing Azure VM, see [Create a custom image of an Azure VM by using the CLI](tutorial-custom-images.md). -If you want to copy an existing VM to another region, you might want to use azcopy to [creat a copy of a disk in another region](disks-upload-vhd-to-managed-disk-cli.md#copy-a-managed-disk). +If you want to copy an existing VM to another region, you might want to use azcopy to [create a copy of a disk in another region](disks-upload-vhd-to-managed-disk-cli.md#copy-a-managed-disk). Otherwise, you should take a snapshot of the VM and then create a new OS VHD from the snapshot. diff --git a/articles/virtual-machines/maintenance-and-updates.md b/articles/virtual-machines/maintenance-and-updates.md index 111b5218da6a..87a824be16f8 100644 --- a/articles/virtual-machines/maintenance-and-updates.md +++ b/articles/virtual-machines/maintenance-and-updates.md @@ -32,7 +32,7 @@ Most nonzero-impact maintenance pauses the VM for less than 10 seconds. In certa Memory-preserving maintenance works for more than 90 percent of Azure VMs. It doesn't work for G, L, M, N, and H series. Azure increasingly uses live-migration technologies and improves memory-preserving maintenance mechanisms to reduce the pause durations. -These maintenance operations that don't require a reboot are applied one fault domain at a time. They stop if they receive any warning health signals from platform monitoring tools. +These maintenance operations that don't require a reboot are applied one fault domain at a time. They stop if they receive any warning health signals from platform monitoring tools. Maintenance operations that do not require a reboot may occur simultaneously in paired regions or Availability Zones. For a given change, the deployment are mostly sequenced across Availability Zones and across Region pairs, but there can be overlap at the tail. These types of updates can affect some applications. When the VM is live-migrated to a different host, some sensitive workloads might show a slight performance degradation in the few minutes leading up to the VM pause. To prepare for VM maintenance and reduce impact during Azure maintenance, try [using Scheduled Events for Windows](./windows/scheduled-events.md) or [Linux](./linux/scheduled-events.md) for such applications. @@ -85,7 +85,7 @@ Availability zones are unique physical locations within an Azure region. Each zo An availability zone is a combination of a fault domain and an update domain. If you create three or more VMs across three zones in an Azure region, your VMs are effectively distributed across three fault domains and three update domains. The Azure platform recognizes this distribution across update domains to make sure that VMs in different zones are not updated at the same time. -Each infrastructure update rolls out zone by zone, within a single region. But, you can have deployment going on in Zone 1, and different deployment going in Zone 2, at the same time. Deployments are not all serialized. But, a single deployment only rolls out one zone at a time to reduce risk. +Each infrastructure update rolls out zone by zone, within a single region. But, you can have deployment going on in Zone 1, and different deployment going in Zone 2, at the same time. Deployments are not all serialized. But, a single deployment that requires a reboot only rolls out one zone at a time to reduce risk. In general, updates that require a reboot are avoided when possible, and Azure attempts to use Live Migration or provide customers control. #### Virtual machine scale sets diff --git a/articles/virtual-machines/windows/faq.yml b/articles/virtual-machines/windows/faq.yml index 173b4d7fd675..e08932962e42 100644 --- a/articles/virtual-machines/windows/faq.yml +++ b/articles/virtual-machines/windows/faq.yml @@ -169,4 +169,4 @@ sections: - For additional information and restrictions for password creation reference this [password guidance documentation](https://docs.microsoft.com/windows/security/threat-protection/security-policy-settings/password-must-meet-complexity-requirements#reference). + For additional information and restrictions for password creation reference this [password guidance documentation](/windows/security/threat-protection/security-policy-settings/password-must-meet-complexity-requirements#reference). \ No newline at end of file diff --git a/articles/virtual-machines/windows/image-builder-gallery-update-image-version.md b/articles/virtual-machines/windows/image-builder-gallery-update-image-version.md index dc01b7a5dbc3..09f9f8e39b0f 100644 --- a/articles/virtual-machines/windows/image-builder-gallery-update-image-version.md +++ b/articles/virtual-machines/windows/image-builder-gallery-update-image-version.md @@ -7,7 +7,7 @@ ms.reviewer: cynthn ms.date: 03/02/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder +ms.subservice: image-builder ms.collection: windows --- # Create a new Windows VM image version from an existing image version using Azure Image Builder diff --git a/articles/virtual-machines/windows/image-builder-gallery.md b/articles/virtual-machines/windows/image-builder-gallery.md index eb59f28edac4..993a17643cb7 100644 --- a/articles/virtual-machines/windows/image-builder-gallery.md +++ b/articles/virtual-machines/windows/image-builder-gallery.md @@ -7,8 +7,8 @@ ms.reviewer: cynthn ms.date: 03/02/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder -ms.colletion: windows +ms.subservice: image-builder +ms.collection: windows ms.custom: devx-track-azurepowershell --- # Create a Windows image and distribute it to an Azure Compute Gallery diff --git a/articles/virtual-machines/windows/image-builder-powershell.md b/articles/virtual-machines/windows/image-builder-powershell.md index d97d98202700..1491853e635a 100644 --- a/articles/virtual-machines/windows/image-builder-powershell.md +++ b/articles/virtual-machines/windows/image-builder-powershell.md @@ -7,8 +7,8 @@ ms.reviewer: cynthn ms.date: 03/02/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder -ms.colletion: windows +ms.subservice: image-builder +ms.collection: windows ms.custom: devx-track-azurepowershell --- # Create a Windows VM with Azure Image Builder using PowerShell diff --git a/articles/virtual-machines/windows/image-builder-vnet.md b/articles/virtual-machines/windows/image-builder-vnet.md index 7d7b06a293e3..e3fe8dd18f12 100644 --- a/articles/virtual-machines/windows/image-builder-vnet.md +++ b/articles/virtual-machines/windows/image-builder-vnet.md @@ -7,8 +7,8 @@ ms.reviewer: cynthn ms.date: 03/02/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder -ms.colletion: windows +ms.subservice: image-builder +ms.collection: windows ms.custom: devx-track-azurepowershell --- # Use Azure Image Builder for Windows VMs allowing access to an existing Azure VNET diff --git a/articles/virtual-machines/windows/image-builder.md b/articles/virtual-machines/windows/image-builder.md index 8d26b0a4f5f5..5767fa997ffd 100644 --- a/articles/virtual-machines/windows/image-builder.md +++ b/articles/virtual-machines/windows/image-builder.md @@ -7,8 +7,8 @@ ms.reviewer: cynthn ms.date: 04/23/2021 ms.topic: how-to ms.service: virtual-machines -ms.subervice: image-builder -ms.colletion: windows +ms.subservice: image-builder +ms.collection: windows --- # Create a Windows VM with Azure Image Builder diff --git a/articles/virtual-machines/windows/on-prem-to-azure.md b/articles/virtual-machines/windows/on-prem-to-azure.md index 49fa349a38bf..10bb4c0040d4 100644 --- a/articles/virtual-machines/windows/on-prem-to-azure.md +++ b/articles/virtual-machines/windows/on-prem-to-azure.md @@ -4,7 +4,7 @@ description: Create VMs in Azure using VHDs uploaded from other clouds like AWS author: roygara manager: twooley ms.service: storage -ms.subervice: disks +ms.subservice: disks ms.workload: infrastructure-services ms.tgt_pltfrm: vm-windows ms.topic: conceptual diff --git a/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md b/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md index 5fb1958a9b83..f9b20a60e496 100644 --- a/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md +++ b/articles/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md @@ -13,7 +13,7 @@ ms.topic: article ms.tgt_pltfrm: vm-windows ms.workload: infrastructure-services ms.custom: subject-rbac-steps -ms.date: 12/07/2021 +ms.date: 05/26/2022 ms.author: radeltch --- @@ -241,15 +241,15 @@ The STONITH device uses a Service Principal to authorize against Microsoft Azure The Service Principal does not have permissions to access your Azure resources by default. You need to give the Service Principal permissions to start and stop (power-off) all virtual machines of the cluster. If you did not already create the custom role, you can create it using [PowerShell](../../../role-based-access-control/role-assignments-powershell.md) or [Azure CLI](../../../role-based-access-control/role-assignments-cli.md) -Use the following content for the input file. You need to adapt the content to your subscriptions that is, replace c276fc76-9cd4-44c9-99a7-4fd71546436e and e91d47c4-76f3-4271-a796-21b4ecfe3624 with the Ids of your subscription. If you only have one subscription, remove the second entry in AssignableScopes. +Use the following content for the input file. You need to adapt the content to your subscriptions that is, replace *xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx* and *yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy* with the Ids of your subscription. If you only have one subscription, remove the second entry in AssignableScopes. ```json { "Name": "Linux Fence Agent Role", "description": "Allows to power-off and start virtual machines", "assignableScopes": [ - "/subscriptions/e663cc2d-722b-4be1-b636-bbd9e4c60fd9", - "/subscriptions/e91d47c4-76f3-4271-a796-21b4ecfe3624" + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "/subscriptions/yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy" ], "actions": [ "Microsoft.Compute/*/read", diff --git a/articles/virtual-machines/workloads/sap/high-availability-guide-suse-pacemaker.md b/articles/virtual-machines/workloads/sap/high-availability-guide-suse-pacemaker.md index 965fcfd57630..57df553db76b 100644 --- a/articles/virtual-machines/workloads/sap/high-availability-guide-suse-pacemaker.md +++ b/articles/virtual-machines/workloads/sap/high-availability-guide-suse-pacemaker.md @@ -13,7 +13,7 @@ ms.topic: article ms.tgt_pltfrm: vm-windows ms.workload: infrastructure-services ms.custom: subject-rbac-steps -ms.date: 04/26/2022 +ms.date: 05/26/2022 ms.author: radeltch --- @@ -485,15 +485,15 @@ This section applies only if you're using a STONITH device that's based on an Az By default, the service principal doesn't have permissions to access your Azure resources. You need to give the service principal permissions to start and stop (deallocate) all virtual machines in the cluster. If you didn't already create the custom role, you can do so by using [PowerShell](../../../role-based-access-control/custom-roles-powershell.md#create-a-custom-role) or the [Azure CLI](../../../role-based-access-control/custom-roles-cli.md). -Use the following content for the input file. You need to adapt the content to your subscriptions. That is, replace *c276fc76-9cd4-44c9-99a7-4fd71546436e* and *e91d47c4-76f3-4271-a796-21b4ecfe3624* with your own subscription IDs. If you have only one subscription, remove the second entry under AssignableScopes. +Use the following content for the input file. You need to adapt the content to your subscriptions. That is, replace *xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx* and *yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy* with your own subscription IDs. If you have only one subscription, remove the second entry under AssignableScopes. ```json { "Name": "Linux fence agent Role", "description": "Allows to power-off and start virtual machines", "assignableScopes": [ - "/subscriptions/e663cc2d-722b-4be1-b636-bbd9e4c60fd9", - "/subscriptions/e91d47c4-76f3-4271-a796-21b4ecfe3624" + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "/subscriptions/yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy" ], "actions": [ "Microsoft.Compute/*/read", diff --git a/articles/virtual-machines/workloads/sap/planning-guide.md b/articles/virtual-machines/workloads/sap/planning-guide.md index 51bd1c01aa8c..767be4ae34bd 100644 --- a/articles/virtual-machines/workloads/sap/planning-guide.md +++ b/articles/virtual-machines/workloads/sap/planning-guide.md @@ -634,7 +634,7 @@ For more documentation, see [this article][vpn-gateway-create-site-to-site-rm-po #### VNet to VNet Connection Using Multi-Site VPN, you need to configure a separate Azure Virtual Network in each of the regions. However often you have the requirement that the software components in the different regions should communicate with each other. Ideally this communication should not be routed from one Azure Region to on-premises and from there to the other Azure Region. To shortcut, Azure offers the possibility to configure a connection from one Azure Virtual Network in one region to another Azure Virtual Network hosted in another region. This functionality is called VNet-to-VNet connection. More details on this functionality can be found here: -[Configure a VNet-to-VNet VPN gateway connection by using the Azure portal](/azure/vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal). +[Configure a VNet-to-VNet VPN gateway connection by using the Azure portal](../../../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). #### Private Connection to Azure ExpressRoute @@ -644,18 +644,18 @@ Find more details on Azure ExpressRoute and offerings here: * [ExpressRoute documentation](https://azure.microsoft.com/documentation/services/expressroute/) * [Azure ExpressRoute pricing](https://azure.microsoft.com/pricing/details/expressroute/) -* [ExpressRoute FAQ](/azure/expressroute/expressroute-faqs) +* [ExpressRoute FAQ](../../../expressroute/expressroute-faqs.md) Express Route enables multiple Azure subscriptions through one ExpressRoute circuit as documented here -* [Tutorial: Connect a virtual network to an ExpressRoute circuit](/azure/expressroute/expressroute-howto-linkvnet-arm) -* [Quickstart: Create and modify an ExpressRoute circuit using Azure PowerShell](/azure/expressroute/expressroute-howto-circuit-arm) +* [Tutorial: Connect a virtual network to an ExpressRoute circuit](../../../expressroute/expressroute-howto-linkvnet-arm.md) +* [Quickstart: Create and modify an ExpressRoute circuit using Azure PowerShell](../../../expressroute/expressroute-howto-circuit-arm.md) #### Forced tunneling in case of cross-premises For VMs joining on-premises domains through site-to-site, point-to-site, or ExpressRoute, you need to make sure that the Internet proxy settings are getting deployed for all the users in those VMs as well. By default, software running in those VMs or users using a browser to access the internet would not go through the company proxy, but would connect straight through Azure to the internet. But even the proxy setting is not a 100% solution to direct the traffic through the company proxy since it is responsibility of software and services to check for the proxy. If software running in the VM is not doing that or an administrator manipulates the settings, traffic to the Internet can be detoured again directly through Azure to the Internet. In order to avoid such a direct internet connectivity, you can configure Forced Tunneling with site-to-site connectivity between on-premises and Azure. The detailed description of the Forced Tunneling feature is published here: -[Configure forced tunneling using the classic deployment model](/azure/vpn-gateway/vpn-gateway-about-forced-tunneling) +[Configure forced tunneling using the classic deployment model](../../../vpn-gateway/vpn-gateway-about-forced-tunneling.md) Forced Tunneling with ExpressRoute is enabled by customers advertising a default route via the ExpressRoute BGP peering sessions. @@ -1908,7 +1908,7 @@ High Availability and Disaster recovery functionality for DBMS in general as wel Here are two examples of a complete SAP NetWeaver HA architecture in Azure - one for Windows and one for Linux. -Unmanaged disks only: The concepts as explained below may need to be compromised a bit when you deploy many SAP systems and the number of VMs deployed are exceeding the maximum limit of Storage Accounts per subscription. In such cases, VHDs of VMs need to be combined within one Storage Account. Usually you would do so by combining VHDs of SAP application layer VMs of different SAP systems. We also combined different VHDs of different DBMS VMs of different SAP systems in one Azure Storage Account. Thereby keeping the IOPS limits of Azure Storage Accounts in mind [Scalability and performance targets for standard storage accounts](/azure/storage/common/scalability-targets-standard-account) +Unmanaged disks only: The concepts as explained below may need to be compromised a bit when you deploy many SAP systems and the number of VMs deployed are exceeding the maximum limit of Storage Accounts per subscription. In such cases, VHDs of VMs need to be combined within one Storage Account. Usually you would do so by combining VHDs of SAP application layer VMs of different SAP systems. We also combined different VHDs of different DBMS VMs of different SAP systems in one Azure Storage Account. Thereby keeping the IOPS limits of Azure Storage Accounts in mind [Scalability and performance targets for standard storage accounts](../../../storage/common/scalability-targets-standard-account.md) ##### ![Windows logo.][Logo_Windows] HA on Windows @@ -2037,4 +2037,4 @@ Read the articles: - [Azure Virtual Machines deployment for SAP NetWeaver](./deployment-guide.md) - [Considerations for Azure Virtual Machines DBMS deployment for SAP workload](./dbms_guide_general.md) -- [SAP HANA infrastructure configurations and operations on Azure](./hana-vm-operations.md) +- [SAP HANA infrastructure configurations and operations on Azure](./hana-vm-operations.md) \ No newline at end of file diff --git a/articles/virtual-machines/workloads/sap/sap-rise-integration.md b/articles/virtual-machines/workloads/sap/sap-rise-integration.md index ac6c725f7944..c04317c3739f 100644 --- a/articles/virtual-machines/workloads/sap/sap-rise-integration.md +++ b/articles/virtual-machines/workloads/sap/sap-rise-integration.md @@ -139,7 +139,7 @@ With the information about available interfaces to the SAP RISE/ECS landscape, s Integrating your SAP system with Azure cloud native services such as Azure Data Factory or Azure Synapse would use these communication channels to the SAP RISE/ECS managed environment. -The following high-level architecture shows possible integration scenario with Azure data services such as [Data Factory](/azure/data-factory) or [Synapse Analytics](/azure/synapse-analytics). For these Azure services either a self-hosted integration runtime (self-hosted IR or IR) or Azure integration runtime (Azure IR) can be used. The use of either integration runtime depends on the [chosen data connector](/azure/data-factory/copy-activity-overview#supported-data-stores-and-formats), most SAP connectors are only available for the self-hosted IR. [SAP ECC connector](/azure/data-factory/connector-sap-ecc?tabs=data-factory) is capable of being using through both Azure IR and self-hosted IR. The choice of IR governs the network path taken. SAP .NET connector is used for [SAP table connector](/azure/data-factory/connector-sap-ecc?tabs=data-factory), [SAP BW](/azure/data-factory/connector-sap-business-warehouse?tabs=data-factory) and [SAP OpenHub](/azure/data-factory/connector-sap-business-warehouse-open-hub) connectors alike. All these connectors use SAP function modules (FM) on the SAP system, executed through RFC connections. Last if direct database access has been agreed with SAP, along with users and connection path opened, ODBC/JDBC connector for [SAP HANA](/azure/data-factory/connector-sap-hana?tabs=data-factory) can be used from the self-hosted IR as well. +The following high-level architecture shows possible integration scenario with Azure data services such as [Data Factory](../../../data-factory/index.yml) or [Synapse Analytics](../../../synapse-analytics/index.yml). For these Azure services either a self-hosted integration runtime (self-hosted IR or IR) or Azure integration runtime (Azure IR) can be used. The use of either integration runtime depends on the [chosen data connector](../../../data-factory/copy-activity-overview.md#supported-data-stores-and-formats), most SAP connectors are only available for the self-hosted IR. [SAP ECC connector](../../../data-factory/connector-sap-ecc.md?tabs=data-factory) is capable of being using through both Azure IR and self-hosted IR. The choice of IR governs the network path taken. SAP .NET connector is used for [SAP table connector](../../../data-factory/connector-sap-ecc.md?tabs=data-factory), [SAP BW](../../../data-factory/connector-sap-business-warehouse.md?tabs=data-factory) and [SAP OpenHub](../../../data-factory/connector-sap-business-warehouse-open-hub.md) connectors alike. All these connectors use SAP function modules (FM) on the SAP system, executed through RFC connections. Last if direct database access has been agreed with SAP, along with users and connection path opened, ODBC/JDBC connector for [SAP HANA](../../../data-factory/connector-sap-hana.md?tabs=data-factory) can be used from the self-hosted IR as well. [![SAP RISE/ECS accessed by Azure ADF or Synapse.](./media/sap-rise-integration/sap-rise-adf-synapse.png)](./media/sap-rise-integration/sap-rise-adf-synapse.png#lightbox) @@ -155,13 +155,13 @@ The customer is responsible for deployment and operation of the self-hosted inte To learn the overall support on SAP data integration scenario, see [SAP data integration using Azure Data Factory whitepaper](https://github.com/Azure/Azure-DataFactory/blob/master/whitepaper/SAP%20Data%20Integration%20using%20Azure%20Data%20Factory.pdf) with detailed introduction on each SAP connector, comparison and guidance. ## On-premise data gateway -Further Azure Services such as [Logic Apps](/azure/logic-apps/logic-apps-using-sap-connector), [Power Apps](/connectors/saperp/) or [Power BI](/power-bi/connect-data/desktop-sap-bw-connector) communicate and exchange data with SAP systems through an on-premise data gateway. The on-premise data gateway is a virtual machine, running in Azure or on-premise. It provides secure data transfer between these Azure Services and your SAP systems. +Further Azure Services such as [Logic Apps](../../../logic-apps/logic-apps-using-sap-connector.md), [Power Apps](/connectors/saperp/) or [Power BI](/power-bi/connect-data/desktop-sap-bw-connector) communicate and exchange data with SAP systems through an on-premise data gateway. The on-premise data gateway is a virtual machine, running in Azure or on-premise. It provides secure data transfer between these Azure Services and your SAP systems. With SAP RISE, the on-premise data gateway can connect to Azure Services running in customer’s Azure subscription. This VM running the data gateway is deployed and operated by the customer. With below high-level architecture as overview, similar method can be used for either service. [![SAP RISE/ECS accessed from Azure on-premise data gateway and connected Azure services.](./media/sap-rise-integration/sap-rise-on-premises-data-gateway.png)](./media/sap-rise-integration/sap-rise-on-premises-data-gateway.png#lightbox) -The SAP RISE environment here provides access to the SAP ports for RFC and https described earlier. The communication ports are accessed by the private network address through the vnet peering or VPN site-to-site connection. The on-premise data gateway VM running in customer’s Azure subscription uses the [SAP .NET connector](https://support.sap.com/en/product/connectors/msnet.html) to run RFC, BAPI or IDoc calls through the RFC connection. Additionally, depending on service and way the communication is setup, a way to connect to public IP of the SAP systems REST API through https might be required. The https connection to a public IP can be exposed through SAP RISE/ECS managed application gateway. This high level architecture shows the possible integration scenario. Alternatives to it such as using Logic Apps single tenant and [private endpoints](/azure/logic-apps/secure-single-tenant-workflow-virtual-network-private-endpoint) to secure the communication and other can be seen as extension and are not described here in. +The SAP RISE environment here provides access to the SAP ports for RFC and https described earlier. The communication ports are accessed by the private network address through the vnet peering or VPN site-to-site connection. The on-premise data gateway VM running in customer’s Azure subscription uses the [SAP .NET connector](https://support.sap.com/en/product/connectors/msnet.html) to run RFC, BAPI or IDoc calls through the RFC connection. Additionally, depending on service and way the communication is setup, a way to connect to public IP of the SAP systems REST API through https might be required. The https connection to a public IP can be exposed through SAP RISE/ECS managed application gateway. This high level architecture shows the possible integration scenario. Alternatives to it such as using Logic Apps single tenant and [private endpoints](../../../logic-apps/secure-single-tenant-workflow-virtual-network-private-endpoint.md) to secure the communication and other can be seen as extension and are not described here in. SAP RISE/ECS exposes the communication ports for these applications to use but has no knowledge about any details of the connected application or service running in a customer’s subscription. @@ -170,7 +170,7 @@ SAP RISE/ECS exposes the communication ports for these applications to use but h ## Azure Monitoring for SAP with SAP RISE -[Azure Monitoring for SAP](/azure/virtual-machines/workloads/sap/monitor-sap-on-azure) is an Azure-native solution for monitoring your SAP system. It extends the Azure monitor platform monitoring capability with support to gather data about SAP NetWeaver, database, and operating system details. +[Azure Monitoring for SAP](./monitor-sap-on-azure.md) is an Azure-native solution for monitoring your SAP system. It extends the Azure monitor platform monitoring capability with support to gather data about SAP NetWeaver, database, and operating system details. > [!Note] > SAP RISE/ECS is a fully managed service for your SAP landscape and thus Azure Monitoring for SAP is not intended to be utilized for such managed environment. diff --git a/articles/virtual-network/nat-gateway/faq.yml b/articles/virtual-network/nat-gateway/faq.yml index b81d66095550..b2be167f4165 100644 --- a/articles/virtual-network/nat-gateway/faq.yml +++ b/articles/virtual-network/nat-gateway/faq.yml @@ -24,7 +24,7 @@ sections: - question: How can I use custom IP prefixes (BYOIP) with Virtual Network NAT gateway? answer: | - You can use public IP prefixes and addresses derived from custom IP prefixes (BYOIP) with your NAT gateway resource. See [Custom IP address prefix (BYOIP)](/azure/virtual-network/ip-services/custom-ip-address-prefix) to learn more. + You can use public IP prefixes and addresses derived from custom IP prefixes (BYOIP) with your NAT gateway resource. See [Custom IP address prefix (BYOIP)](../ip-services/custom-ip-address-prefix.md) to learn more. - question: Can a zone-redundant public IP address be attached to a NAT gateway? answer: | @@ -119,4 +119,4 @@ sections: additionalContent: | ## Next steps - If your question is not listed above, please send feedback about this page with your question. This will create a GitHub issue for the product team to ensure all of our valued customer questions are answered. + If your question is not listed above, please send feedback about this page with your question. This will create a GitHub issue for the product team to ensure all of our valued customer questions are answered. \ No newline at end of file diff --git a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png index 515725d4bf49..83d25904169e 100644 Binary files a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png and b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png differ diff --git a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png index b7070296fffb..c6549285c3a3 100644 Binary files a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png and b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png differ diff --git a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png index 5a899e68afcd..37c1ae642329 100644 Binary files a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png and b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png differ diff --git a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png index bf00ee57864d..33096a52e481 100644 Binary files a/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png and b/articles/virtual-network/nat-gateway/media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png differ diff --git a/articles/virtual-network/nat-gateway/nat-overview.md b/articles/virtual-network/nat-gateway/nat-overview.md index 582980f90fa6..235f890f78c0 100644 --- a/articles/virtual-network/nat-gateway/nat-overview.md +++ b/articles/virtual-network/nat-gateway/nat-overview.md @@ -56,7 +56,7 @@ Virtual Network NAT is a software defined networking service. A NAT gateway won' * Public IP prefixes - * Public IP addresses and prefixes derived from custom IP prefixes (BYOIP), to learn more, see [Custom IP address prefix (BYOIP)](/azure/virtual-network/ip-services/custom-ip-address-prefix) + * Public IP addresses and prefixes derived from custom IP prefixes (BYOIP), to learn more, see [Custom IP address prefix (BYOIP)](../ip-services/custom-ip-address-prefix.md) * Virtual Network NAT is compatible with standard SKU public IP addresses or public IP prefix resources or a combination of both. You can use a public IP prefix directly or distribute the public IP addresses of the prefix across multiple NAT gateway resources. The NAT gateway will groom all traffic to the range of IP addresses of the prefix. @@ -98,4 +98,4 @@ For information on the SLA, see [SLA for Virtual Network NAT](https://azure.micr * Learn about the [NAT gateway resource](./nat-gateway-resource.md). -* [Learn module: Introduction to Azure Virtual Network NAT](/learn/modules/intro-to-azure-virtual-network-nat). +* [Learn module: Introduction to Azure Virtual Network NAT](/learn/modules/intro-to-azure-virtual-network-nat). \ No newline at end of file diff --git a/articles/virtual-network/nat-gateway/resource-health.md b/articles/virtual-network/nat-gateway/resource-health.md index a4bacf54a36e..221fae2dad2a 100644 --- a/articles/virtual-network/nat-gateway/resource-health.md +++ b/articles/virtual-network/nat-gateway/resource-health.md @@ -16,7 +16,7 @@ This article provides guidance on how to use Azure Resource Health to monitor an ## Resource health status -[Azure Resource Health](/azure/service-health/overview) provides information about the health of your NAT gateway resource. You can use resource health and Azure monitor notifications to keep you informed on the availability and health status of your NAT gateway resource. Resource health can help you quickly assess whether an issue is due to a problem in your Azure infrastructure or because of an Azure platform event. The resource health of your NAT gateway is evaluated by measuring the data-path availability of your NAT gateway endpoint. +[Azure Resource Health](../../service-health/overview.md) provides information about the health of your NAT gateway resource. You can use resource health and Azure monitor notifications to keep you informed on the availability and health status of your NAT gateway resource. Resource health can help you quickly assess whether an issue is due to a problem in your Azure infrastructure or because of an Azure platform event. The resource health of your NAT gateway is evaluated by measuring the data-path availability of your NAT gateway endpoint. You can view the status of your NAT gateway’s health status on the **Resource Health** page, found under **Support + troubleshooting** for your NAT gateway resource. @@ -29,7 +29,7 @@ The health of your NAT gateway resource is displayed as one of the following sta | Unavailable | Your NAT gateway resource is not healthy. The metric for the data-path availability has reported less than 25% for the past 15 minutes. You may experience unavailability of your NAT gateway resource for outbound connectivity. | | Unknown | Health status for your NAT gateway resource hasn’t been updated or hasn’t received information for data-path availability for more than 5 minutes. This state should be transient and will reflect the correct status as soon as data is received. | -For more information about Azure Resource Health, see [Resource Health overview](/azure/service-health/resource-health-overview). +For more information about Azure Resource Health, see [Resource Health overview](../../service-health/resource-health-overview.md). To view the health of your NAT gateway resource: @@ -41,7 +41,7 @@ To view the health of your NAT gateway resource: ## Next steps -- Learn about [Virtual Network NAT](/azure/virtual-network/nat-gateway/nat-overview) -- Learn about [metrics and alerts for NAT gateway](/azure/virtual-network/nat-gateway/nat-metrics) -- Learn about [troubleshooting NAT gateway resources](/azure/virtual-network/nat-gateway/troubleshoot-nat) -- Learn about [Azure resource health](/azure/service-health/resource-health-overview) +- Learn about [Virtual Network NAT](./nat-overview.md) +- Learn about [metrics and alerts for NAT gateway](./nat-metrics.md) +- Learn about [troubleshooting NAT gateway resources](./troubleshoot-nat.md) +- Learn about [Azure resource health](../../service-health/resource-health-overview.md) \ No newline at end of file diff --git a/articles/virtual-network/nat-gateway/tutorial-migrate-ilip-nat.md b/articles/virtual-network/nat-gateway/tutorial-migrate-ilip-nat.md index f419a50c2a96..3a26d1ed7f36 100644 --- a/articles/virtual-network/nat-gateway/tutorial-migrate-ilip-nat.md +++ b/articles/virtual-network/nat-gateway/tutorial-migrate-ilip-nat.md @@ -7,7 +7,7 @@ ms.author: allensu ms.service: virtual-network ms.subservice: nat ms.topic: tutorial -ms.date: 2/07/2022 +ms.date: 5/25/2022 ms.custom: template-tutorial --- @@ -83,7 +83,7 @@ In this section, you’ll create a NAT gateway with the IP address you previousl 2. In **NAT gateways**, select **+ Create**. -3. In **Create network address translation (NAT) gateway**, enter or select the following information. +3. In **Create network address translation (NAT) gateway**, enter or select the following information in the **Basics** tab. | Setting | Value | | ------- | ----- | diff --git a/articles/virtual-network/nat-gateway/tutorial-migrate-outbound-nat.md b/articles/virtual-network/nat-gateway/tutorial-migrate-outbound-nat.md index a4788d5938a6..bb29a2efbbe0 100644 --- a/articles/virtual-network/nat-gateway/tutorial-migrate-outbound-nat.md +++ b/articles/virtual-network/nat-gateway/tutorial-migrate-outbound-nat.md @@ -7,7 +7,7 @@ ms.author: allensu ms.service: virtual-network ms.subservice: nat ms.topic: tutorial -ms.date: 1/11/2022 +ms.date: 5/25/2022 ms.custom: template-tutorial --- @@ -33,7 +33,7 @@ In this tutorial, you learn how to: * The load balancer name used in the examples is **myLoadBalancer**. > [!NOTE] -> Virtual Network NAT provides outbound connectivity for standard internal load balancers. To configure create a NAT gateway resource and associate it to your subnet. For more information on integrating a NAT gateway with your internal load balancers, see [Tutorial: Integrate NAT gateway with an internal load balancer - Azure portal - Virtual Network NAT](tutorial-nat-gateway-load-balancer-internal-portal.md). +> Virtual Network NAT provides outbound connectivity for standard internal load balancers. For more information on integrating a NAT gateway with your internal load balancers, see [Tutorial: Integrate a NAT gateway with an internal load balancer using Azure portal](tutorial-nat-gateway-load-balancer-internal-portal.md). ## Migrate default outbound access @@ -45,7 +45,7 @@ In this section, you’ll learn how to change your outbound connectivity method 3. In **NAT gateways**, select **+ Create**. -4. In **Create network address translation (NAT) gateway**, enter or select the following information. +4. In **Create network address translation (NAT) gateway**, enter or select the following information in the **Basics** tab. | Setting | Value | | ------- | ----- | @@ -113,7 +113,7 @@ In this section, you’ll create a NAT gateway with the IP address previously us 2. In **NAT gateways**, select **+ Create**. -3. In **Create network address translation (NAT) gateway**, enter or select the following information. +3. In **Create network address translation (NAT) gateway**, enter or select the following information in the **Basics** tab. | Setting | Value | | ------- | ----- | diff --git a/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-internal-portal.md b/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-internal-portal.md index 3d0a4f118039..254371eed049 100644 --- a/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-internal-portal.md +++ b/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-internal-portal.md @@ -7,7 +7,7 @@ ms.author: allensu ms.service: virtual-network ms.subservice: nat ms.topic: tutorial -ms.date: 08/04/2021 +ms.date: 05/24/2022 ms.custom: template-tutorial --- @@ -73,7 +73,7 @@ In this section, you'll create a virtual network and subnet. 8. Select **Save**. -9. Select the **Security** tab. +9. Select the **Security** tab or select the **Next: Security** button at the bottom of the page. 10. Under **BastionHost**, select **Enable**. Enter this information: @@ -112,64 +112,66 @@ During the creation of the load balancer, you'll configure: | **Instance details** | | | Name | Enter **myLoadBalancer** | | Region | Select **(US) East US**. | - | Type | Select **Internal**. | | SKU | Leave the default **Standard**. | + | Type | Select **Internal**. | 4. Select **Next: Frontend IP configuration** at the bottom of the page. -5. In **Frontend IP configuration**, select **+ Add a frontend IP**. +5. In **Frontend IP configuration**, select **+ Add a frontend IP configuration**. 6. Enter **LoadBalancerFrontend** in **Name**. -7. Select **myBackendSubnet** in **Subnet**. +7. Select **myVNet** in **Virtual network**. + +8. Select **myBackendSubnet** in **Subnet**. -8. Select **Dynamic** for **Assignment**. +9. Select **Dynamic** for **Assignment**. -9. Select **Zone-redundant** in **Availability zone**. +10. Select **Zone-redundant** in **Availability zone**. > [!NOTE] > In regions with [Availability Zones](../../availability-zones/az-overview.md?toc=%2fazure%2fvirtual-network%2ftoc.json#availability-zones), you have the option to select no-zone (default option), a specific zone, or zone-redundant. The choice will depend on your specific domain failure requirements. In regions without Availability Zones, this field won't appear.
    For more information on availability zones, see [Availability zones overview](../../availability-zones/az-overview.md). -10. Select **Add**. +11. Select **Add**. -11. Select **Next: Backend pools** at the bottom of the page. +12. Select **Next: Backend pools** at the bottom of the page. -12. In the **Backend pools** tab, select **+ Add a backend pool**. +13. In the **Backend pools** tab, select **+ Add a backend pool**. -13. Enter **myBackendPool** for **Name** in **Add backend pool**. +14. Enter **myBackendPool** for **Name** in **Add backend pool**. -14. Select **NIC** or **IP Address** for **Backend Pool Configuration**. +15. Select **NIC** or **IP Address** for **Backend Pool Configuration**. -15. Select **IPv4** or **IPv6** for **IP version**. +16. Select **IPv4** or **IPv6** for **IP version**. -16. Select **Add**. +17. Select **Add**. -17. Select the **Next: Inbound rules** button at the bottom of the page. +18. Select the **Next: Inbound rules** button at the bottom of the page. -18. In **Load balancing rule** in the **Inbound rules** tab, select **+ Add a load balancing rule**. +19. In **Load balancing rule** in the **Inbound rules** tab, select **+ Add a load balancing rule**. -19. In **Add load balancing rule**, enter or select the following information: +20. In **Add load balancing rule**, enter or select the following information: | Setting | Value | | ------- | ----- | | Name | Enter **myHTTPRule** | | IP Version | Select **IPv4** or **IPv6** depending on your requirements. | | Frontend IP address | Select **LoadBalancerFrontend**. | + | Backend pool | Select **myBackendPool**. | | Protocol | Select **TCP**. | | Port | Enter **80**. | | Backend port | Enter **80**. | - | Backend pool | Select **myBackendPool**. | | Health probe | Select **Create new**.
    In **Name**, enter **myHealthProbe**.
    Select **HTTP** in **Protocol**.
    Leave the rest of the defaults, and select **OK**. | | Session persistence | Select **None**. | | Idle timeout (minutes) | Enter or select **15**. | | TCP reset | Select **Enabled**. | | Floating IP | Select **Disabled**. | -20. Select **Add**. +21. Select **Add**. -21. Select the blue **Review + create** button at the bottom of the page. +22. Select the blue **Review + create** button at the bottom of the page. -22. Select **Create**. +23. Select **Create**. ## Create virtual machines @@ -275,25 +277,23 @@ In this section, you'll create a NAT gateway and assign it to the subnet in the In this section, we'll test the NAT gateway. We'll first discover the public IP of the NAT gateway. We'll then connect to the test virtual machine and verify the outbound connection through the NAT gateway. -1. Find the public IP address for the NAT gateway on the **Overview** screen. Select **All services** in the left-hand menu, select **All resources**, and then select **myPublicIP**. +1. Select **Resource groups** in the left-hand menu, select the **TutorIntLBNAT-rg** resource group, and then from the resources list, select **myNATgatewayIP**. 2. Make note of the public IP address: :::image type="content" source="./media/tutorial-nat-gateway-load-balancer-internal-portal/find-public-ip.png" alt-text="Screenshot of discover public IP address of NAT gateway." border="true"::: -3. Select **All services** in the left-hand menu, select **All resources**, and then from the resources list, select **myVM1** that is located in the **TutorIntLBNAT-rg** resource group. +3. Select **Resource groups** in the left-hand menu, select the **TutorIntLBNAT-rg** resource group, and then from the resources list, select **myVM1**. 4. On the **Overview** page, select **Connect**, then **Bastion**. -5. Select the blue **Use Bastion** button. - -6. Enter the username and password entered during VM creation. +5. Enter the username and password entered during VM creation. -7. Open **Internet Explorer** on **myVM1**. +6. Open **Internet Explorer** on **myVM1**. -8. Enter **https://whatsmyip.com** in the address bar. +7. Enter **https://whatsmyip.com** in the address bar. -9. Verify the IP address displayed matches the NAT gateway address you noted in the previous step: +8. Verify the IP address displayed matches the NAT gateway address you noted in the previous step: :::image type="content" source="./media/tutorial-nat-gateway-load-balancer-internal-portal/my-ip.png" alt-text="Screenshot of Internet Explorer showing external outbound IP." border="true"::: diff --git a/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-public-portal.md b/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-public-portal.md index 4cb9294dd9ce..4c2627b40a2f 100644 --- a/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-public-portal.md +++ b/articles/virtual-network/nat-gateway/tutorial-nat-gateway-load-balancer-public-portal.md @@ -7,7 +7,7 @@ ms.author: allensu ms.service: virtual-network ms.subservice: nat ms.topic: tutorial -ms.date: 03/19/2021 +ms.date: 05/24/2022 ms.custom: template-tutorial --- @@ -35,7 +35,7 @@ An Azure account with an active subscription. [Create an account for free](https In this section, you'll create a virtual network and subnet. -1. In the search box at the top of the portal, enter **Virtual network**. Select **Virtual Networks** in the search results. +1. In the search box at the top of the portal, enter **Virtual network**. Select **Virtual networks** in the search results. 2. In **Virtual networks**, select **+ Create**. @@ -69,7 +69,7 @@ In this section, you'll create a virtual network and subnet. 8. Select **Save**. -9. Select the **Security** tab. +9. Select the **Security** tab or select the **Next: Security** button at the bottom of the page. 10. Under **BastionHost**, select **Enable**. Enter this information: @@ -108,14 +108,14 @@ During the creation of the load balancer, you'll configure: | **Instance details** | | | Name | Enter **myLoadBalancer** | | Region | Select **(US) East US**. | - | Type | Select **Public**. | | SKU | Leave the default **Standard**. | + | Type | Select **Public**. | | Tier | Leave the default **Regional**. | 4. Select **Next: Frontend IP configuration** at the bottom of the page. -5. In **Frontend IP configuration**, select **+ Add a frontend IP**. +5. In **Frontend IP configuration**, select **+ Add a frontend IP configuration**. 6. Enter **LoadBalancerFrontend** in **Name**. @@ -169,10 +169,10 @@ During the creation of the load balancer, you'll configure: | Name | Enter **myHTTPRule** | | IP Version | Select **IPv4** or **IPv6** depending on your requirements. | | Frontend IP address | Select **LoadBalancerFrontend**. | + | Backend pool | Select **myBackendPool**. | | Protocol | Select **TCP**. | | Port | Enter **80**. | | Backend port | Enter **80**. | - | Backend pool | Select **myBackendPool**. | | Health probe | Select **Create new**.
    In **Name**, enter **myHealthProbe**.
    Select **HTTP** in **Protocol**.
    Leave the rest of the defaults, and select **OK**. | | Session persistence | Select **None**. | | Idle timeout (minutes) | Enter or select **15**. | @@ -290,25 +290,23 @@ In this section, you'll create a NAT gateway and assign it to the subnet in the In this section, we'll test the NAT gateway. We'll first discover the public IP of the NAT gateway. We'll then connect to the test virtual machine and verify the outbound connection through the NAT gateway. -1. Find the public IP address for the NAT gateway on the **Overview** screen. Select **All services** in the left-hand menu, select **All resources**, and then select **myPublicIP**. +1. Select **Resource groups** in the left-hand menu, select the **TutorPubLBNAT-rg** resource group, and then from the resources list, select **myNATgatewayIP**. 2. Make note of the public IP address: :::image type="content" source="./media/tutorial-nat-gateway-load-balancer-public-portal/find-public-ip.png" alt-text="Screenshot discover public IP address of NAT gateway." border="true"::: -3. Select **All services** in the left-hand menu, select **All resources**, and then from the resources list, select **myVM1** that is located in the **TutorPubLBNAT-rg** resource group. +3. Select **Resource groups** in the left-hand menu, select the **TutorPubLBNAT-rg** resource group, and then from the resources list, select **myVM1**. 4. On the **Overview** page, select **Connect**, then **Bastion**. -5. Select the blue **Use Bastion** button. - -6. Enter the username and password entered during VM creation. +5. Enter the username and password entered during VM creation. -7. Open **Internet Explorer** on **myVM1**. +6. Open **Internet Explorer** on **myVM1**. -8. Enter **https://whatsmyip.com** in the address bar. +7. Enter **https://whatsmyip.com** in the address bar. -9. Verify the IP address displayed matches the NAT gateway address you noted in the previous step: +8. Verify the IP address displayed matches the NAT gateway address you noted in the previous step: :::image type="content" source="./media/tutorial-nat-gateway-load-balancer-public-portal/my-ip.png" alt-text="Screenshot Internet Explorer showing external outbound IP." border="true"::: diff --git a/articles/virtual-wan/monitor-virtual-wan.md b/articles/virtual-wan/monitor-virtual-wan.md index 06f16defb208..f05d35e2b670 100644 --- a/articles/virtual-wan/monitor-virtual-wan.md +++ b/articles/virtual-wan/monitor-virtual-wan.md @@ -1,19 +1,17 @@ --- title: 'Monitoring Azure Virtual WAN' description: Learn about Azure Virtual WAN logs and metrics using Azure Monitor. -services: virtual-wan author: cherylmc - ms.service: virtual-wan ms.topic: how-to -ms.date: 06/30/2021 +ms.date: 05/25/2022 ms.author: cherylmc --- # Monitoring Virtual WAN -You can monitor Azure Virtual WAN using Azure Monitor. Virtual WAN is a networking service that brings together many networking, security, and routing functionalities to provide a single operational interface. Virtual WAN VPN gateways, ExpressRoute gateways, and Azure Firewall have logging and metrics available through Azure Monitor. +You can monitor Azure Virtual WAN using Azure Monitor. Virtual WAN is a networking service that brings together many networking, security, and routing functionalities to provide a single operational interface. Virtual WAN VPN gateways, ExpressRoute gateways, and Azure Firewall have logging and metrics available through Azure Monitor. This article discusses metrics and diagnostics that are available through the portal. Metrics are lightweight and can support near real-time scenarios, making them useful for alerting and fast issue detection. @@ -29,7 +27,6 @@ Diagnostics and logging configuration must be done from there accessing the **Di :::image type="content" source="./media/monitor-virtual-wan/firewall-diagnostic-settings.png" alt-text="Screenshot shows Firewall diagnostic settings."::: - ## Metrics Metrics in Azure Monitor are numerical values that describe some aspect of a system at a particular time. Metrics are collected every minute, and are useful for alerting because they can be sampled frequently. An alert can be fired quickly with relatively simple logic. @@ -71,7 +68,7 @@ $MetricInformation.Data * Minimum – Minimum bytes that were sent during the selected time grain period. * Maximum – Maximum bytes that were sent during the selected time grain period * Total – Total bytes/sec that were sent during the selected time grain period. - + ### Site-to-site VPN gateways The following metrics are available for Azure site-to-site VPN gateways: @@ -129,14 +126,15 @@ The following metrics are available for Azure ExpressRoute gateways: | Metric | Description| | --- | --- | -| **BitsInPerSecond** | Bits per second ingressing Azure through the ExpressRoute Gateway. | -| **BitsOutPerSecond** | Bits per second egressing Azure through the ExpressRoute Gateway | -| **CPU Utilization** | CPU Utilization of the ExpressRoute Gateway.| -| **Packets per second** | Total Packets received on ExpressRoute Gateway per second.| -| **Count of routes advertised to peer**| Count of Routes Advertised to Peer by ExpressRoute Gateway. | -| **Count of routes learned from peer**| Count of Routes Learned from Peer by ExpressRoute Gateway.| -| **Frequency of routes changed** | Frequency of Route changes in ExpressRoute Gateway.| -| **Number of VMs in Virtual Network**| Number of VM's that use this ExpressRoute Gateway.| +| **BitsInPerSecond** | Bits per second ingressing Azure via ExpressRoute gateway which can be further split for specific connections. | +| **BitsOutPerSecond** | Bits per second egressing Azure via ExpressRoute gateway which can be further split for specific connection. | +| **Bits Received Per Second** | Total Bits received on ExpressRoute gateway per second. | +| **CPU Utilization** | CPU Utilization of the ExpressRoute gateway.| +| **Packets per second** | Total Packets received on ExpressRoute gateway per second.| +| **Count of routes advertised to peer**| Count of Routes Advertised to Peer by ExpressRoute gateway. | +| **Count of routes learned from peer**| Count of Routes Learned from Peer by ExpressRoute gateway.| +| **Frequency of routes changed** | Frequency of Route changes in ExpressRoute gateway.| +| **Number of VMs in Virtual Network**| Number of VMs that use this ExpressRoute gateway.| ### View gateway metrics @@ -221,8 +219,8 @@ In order to execute the query, you have to open the Log Analytics resource you c :::image type="content" source="./media/monitor-virtual-wan/log-analytics-query-samples.png" alt-text="Log Analytics Query Samples."::: -For additional Log Analytics query samples for Azure VPN Gateway, both Site-to-Site and Point-to-Site, you can visit the page [Troubleshoot Azure VPN Gateway using diagnostic logs](../vpn-gateway/troubleshoot-vpn-with-azure-diagnostics.md). -For Azure Firewall, a [workbook](../firewall/firewall-workbook.md) is provided to make log analysis easier. Using its graphical interface, it will be possible to investigate into the diagnostic data without manually writing any Log Analytics query. +For additional Log Analytics query samples for Azure VPN Gateway, both Site-to-Site and Point-to-Site, you can visit the page [Troubleshoot Azure VPN Gateway using diagnostic logs](../vpn-gateway/troubleshoot-vpn-with-azure-diagnostics.md). +For Azure Firewall, a [workbook](../firewall/firewall-workbook.md) is provided to make log analysis easier. Using its graphical interface, it will be possible to investigate into the diagnostic data without manually writing any Log Analytics query. ## Activity logs diff --git a/articles/virtual-wan/virtual-wan-expressroute-portal.md b/articles/virtual-wan/virtual-wan-expressroute-portal.md index b66ec1074eb2..b5a8328e70cd 100644 --- a/articles/virtual-wan/virtual-wan-expressroute-portal.md +++ b/articles/virtual-wan/virtual-wan-expressroute-portal.md @@ -1,12 +1,10 @@ --- title: 'Tutorial: Create ExpressRoute connections using Azure Virtual WAN' description: In this tutorial, learn how to use Azure Virtual WAN to create ExpressRoute connections to Azure and on-premises environments. -services: virtual-wan author: cherylmc - ms.service: virtual-wan ms.topic: tutorial -ms.date: 04/27/2021 +ms.date: 05/25/2022 ms.author: cherylmc # Customer intent: As someone with a networking background, I want to connect my corporate on-premises network(s) to my VNets using Virtual WAN and ExpressRoute. --- @@ -41,21 +39,7 @@ Verify that you have met the following criteria before beginning your configurat ## Create a virtual WAN -From a browser, navigate to the [Azure portal](https://portal.azure.com) and sign in with your Azure account. - -1. Navigate to the Virtual WAN page. In the portal, click **+Create a resource**. Type **Virtual WAN** into the search box and select Enter. -2. Select **Virtual WAN** from the results. On the Virtual WAN page, click **Create** to open the Create WAN page. -3. On the **Create WAN** page, on the **Basics** tab, fill in the following fields: - - :::image type="content" source="./media/virtual-wan-expressroute-portal/createwan.png" alt-text="Screenshot shows Create WAN page." border="false"::: - - * **Subscription** - Select the subscription that you want to use. - * **Resource Group** - Create new or use existing. - * **Resource group location** - Choose a resource location from the dropdown. A WAN is a global resource and does not live in a particular region. However, you must select a region in order to more easily manage and locate the WAN resource that you create. - * **Name** - Type the name that you want to call your WAN. - * **Type** - Select **Standard**. You can't create an ExpressRoute gateway using the Basic SKU. -4. After you finish filling out the fields, select **Review +Create**. -5. Once validation passes, select **Create** to create the virtual WAN. +[!INCLUDE [Create a virtual WAN](../../includes/virtual-wan-create-vwan-include.md)] ## Create a virtual hub and gateway diff --git a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64-expand.png b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64-expand.png new file mode 100644 index 000000000000..8b2d26de2312 Binary files /dev/null and b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64-expand.png differ diff --git a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png index 8b2d26de2312..a0283a52d5da 100644 Binary files a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png and b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png differ diff --git a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert-expand.png b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert-expand.png new file mode 100644 index 000000000000..be2bc9e9fba1 Binary files /dev/null and b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert-expand.png differ diff --git a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png index be2bc9e9fba1..f4d35ef277a9 100644 Binary files a/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png and b/articles/vpn-gateway/media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png differ diff --git a/articles/vpn-gateway/point-to-site-about.md b/articles/vpn-gateway/point-to-site-about.md index f2132b2d8982..74e7df6ea2d7 100644 --- a/articles/vpn-gateway/point-to-site-about.md +++ b/articles/vpn-gateway/point-to-site-about.md @@ -42,7 +42,7 @@ The validation of the client certificate is performed by the VPN gateway and hap ### Authenticate using native Azure Active Directory authentication -Azure AD authentication allows users to connect to Azure using their Azure Active Directory credentials. Native Azure AD authentication is only supported for OpenVPN protocol and Windows 10 and 11 and also requires the use of the [Azure VPN Client](https://go.microsoft.com/fwlink/?linkid=2117554). +Azure AD authentication allows users to connect to Azure using their Azure Active Directory credentials. Native Azure AD authentication is only supported for OpenVPN protocol and Windows 10 and later and also requires the use of the [Azure VPN Client](https://go.microsoft.com/fwlink/?linkid=2117554). With native Azure AD authentication, you can leverage Azure AD's conditional access as well as Multi-Factor Authentication (MFA) features for VPN. diff --git a/articles/vpn-gateway/point-to-site-how-to-radius-ps.md b/articles/vpn-gateway/point-to-site-how-to-radius-ps.md index 735b9e590722..6b1199a76c86 100644 --- a/articles/vpn-gateway/point-to-site-how-to-radius-ps.md +++ b/articles/vpn-gateway/point-to-site-how-to-radius-ps.md @@ -22,7 +22,7 @@ A P2S VPN connection is started from Windows and Mac devices. Connecting clients * RADIUS server * VPN Gateway native certificate authentication -* Native Azure Active Directory authentication (Windows 10 only) +* Native Azure Active Directory authentication (Windows 10 and later only) This article helps you configure a P2S configuration with authentication using RADIUS server. If you want to authenticate using generated certificates and VPN gateway native certificate authentication instead, see [Configure a Point-to-Site connection to a VNet using VPN gateway native certificate authentication](vpn-gateway-howto-point-to-site-rm-ps.md) or [Create an Azure Active Directory tenant for P2S OpenVPN protocol connections](openvpn-azure-ad-tenant.md) for Azure Active Directory authentication. diff --git a/articles/vpn-gateway/site-to-site-vpn-private-peering.md b/articles/vpn-gateway/site-to-site-vpn-private-peering.md index b03601abad3d..cf68ffbd5791 100644 --- a/articles/vpn-gateway/site-to-site-vpn-private-peering.md +++ b/articles/vpn-gateway/site-to-site-vpn-private-peering.md @@ -7,7 +7,7 @@ author: cherylmc ms.service: vpn-gateway ms.topic: how-to -ms.date: 04/28/2021 +ms.date: 05/26/2022 ms.author: cherylmc --- @@ -22,7 +22,7 @@ You can configure a Site-to-Site VPN to a virtual network gateway over an Expres * It is possible to deploy Site-to-Site VPN connections over ExpressRoute private peering at the same time as Site-to-Site VPN connections via the Internet on the same VPN gateway. >[!NOTE] ->This feature is only supported on zone-redundant gateways. For example, VpnGw1AZ, VpnGw2AZ, etc. +>This feature is supported on gateways with a Standard Public IP only. > To complete this configuration, verify that you meet the following prerequisites: @@ -71,9 +71,7 @@ In both of these examples, Azure will send traffic to 10.0.1.0/24 over the VPN c ## Portal steps -1. Configure a Site-to-Site connection. For steps, see the [Site-to-site configuration](./tutorial-site-to-site-portal.md) article. Be sure to pick a zone-redundant gateway SKU for the gateway. - - Zone-redundant SKUs have “AZ” at the end of the SKU. For example, **VpnGw1AZ**. Zone-redundant gateways are only available in regions where the availability zone service is available. For information about the regions in which we support availability zones, see [Regions that support availability zones](../availability-zones/az-region.md). +1. Configure a Site-to-Site connection. For steps, see the [Site-to-site configuration](./tutorial-site-to-site-portal.md) article. Be sure to pick a gateway with a Standard Public IP. :::image type="content" source="media/site-to-site-vpn-private-peering/gateway.png" alt-text="Gateway Private IPs"::: 1. Enable Private IPs on the gateway. Select **Configuration**, then set **Gateway Private IPs** to **Enabled**. Select **Save** to save your changes. @@ -87,7 +85,7 @@ In both of these examples, Azure will send traffic to 10.0.1.0/24 over the VPN c ## PowerShell steps -1. Configure a Site-to-Site connection. For steps, see the [Configure a Site-to-Site VPN](./tutorial-site-to-site-portal.md) article. Be sure to pick a zone-redundant gateway SKU for the gateway. Zone-redundant SKUs have “AZ” at the end of the SKU. For example, VpnGw1AZ. +1. Configure a Site-to-Site connection. For steps, see the [Configure a Site-to-Site VPN](./tutorial-site-to-site-portal.md) article. Be sure to pick a gateway with a Standard Public IP. 1. Set the flag to use the private IP on the gateway using the following PowerShell commands: ```azurepowershell-interactive diff --git a/articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md b/articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md index e1c720bb42f5..4c7bdecb0ef5 100644 --- a/articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md +++ b/articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md @@ -15,7 +15,7 @@ ms.author: cherylmc Point-to-Site connections use certificates to authenticate. This article shows you how to create a self-signed root certificate and generate client certificates using MakeCert. If you are looking for different certificate instructions, see [Certificates - PowerShell](vpn-gateway-certificates-point-to-site.md) or [Certificates - Linux](vpn-gateway-certificates-point-to-site-linux.md). -While we recommend using the [Windows 10 PowerShell steps](vpn-gateway-certificates-point-to-site.md) to create your certificates, we provide these MakeCert instructions as an optional method. The certificates that you generate using either method can be installed on [any supported client operating system](vpn-gateway-howto-point-to-site-resource-manager-portal.md#faq). However, MakeCert has the following limitation: +While we recommend using the [Windows 10 or later PowerShell steps](vpn-gateway-certificates-point-to-site.md) to create your certificates, we provide these MakeCert instructions as an optional method. The certificates that you generate using either method can be installed on [any supported client operating system](vpn-gateway-howto-point-to-site-resource-manager-portal.md#faq). However, MakeCert has the following limitation: * MakeCert is deprecated. This means that this tool could be removed at any point. Any certificates that you already generated using MakeCert won't be affected when MakeCert is no longer available. MakeCert is only used to generate the certificates, not as a validating mechanism. diff --git a/articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md b/articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md index f9dc1004e915..311975444f14 100644 --- a/articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md +++ b/articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md @@ -13,11 +13,11 @@ ms.author: cherylmc --- # Generate and export certificates for Point-to-Site using PowerShell -Point-to-Site connections use certificates to authenticate. This article shows you how to create a self-signed root certificate and generate client certificates using PowerShell on Windows 10 or Windows Server 2016. If you are looking for different certificate instructions, see [Certificates - Linux](vpn-gateway-certificates-point-to-site-linux.md) or [Certificates - MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). +Point-to-Site connections use certificates to authenticate. This article shows you how to create a self-signed root certificate and generate client certificates using PowerShell on Windows 10 or later, or Windows Server 2016. If you are looking for different certificate instructions, see [Certificates - Linux](vpn-gateway-certificates-point-to-site-linux.md) or [Certificates - MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). -The steps in this article apply to Windows 10 or Windows Server 2016. The PowerShell cmdlets that you use to generate certificates are part of the operating system and do not work on other versions of Windows. The Windows 10 or Windows Server 2016 computer is only needed to generate the certificates. Once the certificates are generated, you can upload them, or install them on any supported client operating system. +The steps in this article apply to Windows 10 or later, or Windows Server 2016. The PowerShell cmdlets that you use to generate certificates are part of the operating system and do not work on other versions of Windows. The Windows 10 or later, or Windows Server 2016 computer is only needed to generate the certificates. Once the certificates are generated, you can upload them, or install them on any supported client operating system. -If you do not have access to a Windows 10 or Windows Server 2016 computer, you can use [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md) to generate certificates. The certificates that you generate using either method can be installed on any [supported](vpn-gateway-howto-point-to-site-resource-manager-portal.md#faq) client operating system. +If you do not have access to a Windows 10 or later, or Windows Server 2016 computer, you can use [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md) to generate certificates. The certificates that you generate using either method can be installed on any [supported](vpn-gateway-howto-point-to-site-resource-manager-portal.md#faq) client operating system. [!INCLUDE [generate and export certificates](../../includes/vpn-gateway-generate-export-certificates-include.md)] diff --git a/articles/vpn-gateway/vpn-gateway-howto-always-on-device-tunnel.md b/articles/vpn-gateway/vpn-gateway-howto-always-on-device-tunnel.md index f3341d2e37c3..4b607d1747d3 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-always-on-device-tunnel.md +++ b/articles/vpn-gateway/vpn-gateway-howto-always-on-device-tunnel.md @@ -1,7 +1,7 @@ --- title: 'Configure an Always-On VPN tunnel' titleSuffix: Azure VPN Gateway -description: Learn how to use gateways with Windows 10 Always On to establish and configure persistent device tunnels to Azure. +description: Learn how to use gateways with Windows 10 or later Always On to establish and configure persistent device tunnels to Azure. services: vpn-gateway author: cherylmc diff --git a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-classic-azure-portal.md b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-classic-azure-portal.md index 730185267924..e14f55707d18 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-classic-azure-portal.md +++ b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-classic-azure-portal.md @@ -96,7 +96,7 @@ If you already have a VNet, verify that the settings are compatible with your VP Azure uses certificates to authenticate VPN clients for Point-to-Site VPNs. You upload the public key information of the root certificate to Azure. The public key is then considered *trusted*. Client certificates must be generated from the trusted root certificate, and then installed on each client computer in the Certificates-Current User\Personal\Certificates certificate store. The certificate is used to authenticate the client when it connects to the VNet. -If you use self-signed certificates, they must be created by using specific parameters. You can create a self-signed certificate by using the instructions for [PowerShell and Windows 10](vpn-gateway-certificates-point-to-site.md), or [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). It's important to follow the steps in these instructions when you use self-signed root certificates and generate client certificates from the self-signed root certificate. Otherwise, the certificates you create won't be compatible with P2S connections and you'll receive a connection error. +If you use self-signed certificates, they must be created by using specific parameters. You can create a self-signed certificate by using the instructions for [PowerShell and Windows 10 or later](vpn-gateway-certificates-point-to-site.md), or [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). It's important to follow the steps in these instructions when you use self-signed root certificates and generate client certificates from the self-signed root certificate. Otherwise, the certificates you create won't be compatible with P2S connections and you'll receive a connection error. ### Acquire the public key (.cer) for the root certificate diff --git a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-resource-manager-portal.md b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-resource-manager-portal.md index 3daafc90d6c8..27dff45a6f6e 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-resource-manager-portal.md +++ b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-resource-manager-portal.md @@ -1,19 +1,17 @@ --- title: 'Connect to a VNet using P2S VPN & certificate authentication: portal' titleSuffix: Azure VPN Gateway -description: Learn how to connect Windows, macOS, and Linux clients securely to a VNet using VPN Gateway Point-to-Site connections and self-signed or CA issued certificates. -services: vpn-gateway +description: Learn how to connect Windows, macOS, and Linux clients securely to a VNet using VPN Gateway point-to-site connections and self-signed or CA issued certificates. author: cherylmc - ms.service: vpn-gateway ms.topic: how-to -ms.date: 04/20/2022 +ms.date: 05/26/2022 ms.author: cherylmc --- -# Configure a Point-to-Site VPN connection using Azure certificate authentication: Azure portal +# Configure a point-to-site VPN connection using Azure certificate authentication: Azure portal -This article helps you securely connect individual clients running Windows, Linux, or macOS to an Azure VNet. Point-to-Site VPN connections are useful when you want to connect to your VNet from a remote location, such when you're telecommuting from home or a conference. You can also use P2S instead of a Site-to-Site VPN when you have only a few clients that need to connect to a VNet. Point-to-Site connections don't require a VPN device or a public-facing IP address. P2S creates the VPN connection over either SSTP (Secure Socket Tunneling Protocol), or IKEv2. For more information about Point-to-Site VPN, see [About Point-to-Site VPN](point-to-site-about.md). +This article helps you securely connect individual clients running Windows, Linux, or macOS to an Azure VNet. point-to-site VPN connections are useful when you want to connect to your VNet from a remote location, such when you're telecommuting from home or a conference. You can also use P2S instead of a Site-to-Site VPN when you have only a few clients that need to connect to a VNet. point-to-site connections don't require a VPN device or a public-facing IP address. P2S creates the VPN connection over either SSTP (Secure Socket Tunneling Protocol), or IKEv2. For more information about point-to-site VPN, see [About point-to-site VPN](point-to-site-about.md). :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/point-to-site-diagram.png" alt-text="Connect from a computer to an Azure VNet - point-to-site connection diagram."::: @@ -52,7 +50,7 @@ You can use the following values to create a test environment, or refer to these **Connection type and client address pool** * **Connection type:** Point-to-site -* **Client address pool:** 172.16.201.0/24
    VPN clients that connect to the VNet using this Point-to-Site connection receive an IP address from the client address pool. +* **Client address pool:** 172.16.201.0/24
    VPN clients that connect to the VNet using this point-to-site connection receive an IP address from the client address pool. ## Create a VNet @@ -60,7 +58,7 @@ In this section, you create a virtual network. [!INCLUDE [About cross-premises addresses](../../includes/vpn-gateway-cross-premises.md)] -[!INCLUDE [Basic Point-to-Site VNet](../../includes/vpn-gateway-basic-vnet-rm-portal-include.md)] +[!INCLUDE [Basic point-to-site VNet](../../includes/vpn-gateway-basic-vnet-rm-portal-include.md)] ## Create the VPN gateway @@ -81,7 +79,7 @@ You can see the deployment status on the Overview page for your gateway. After t ## Generate certificates -Certificates are used by Azure to authenticate clients connecting to a VNet over a Point-to-Site VPN connection. Once you obtain a root certificate, you [upload](#uploadfile) the public key information to Azure. The root certificate is then considered 'trusted' by Azure for connection over P2S to the virtual network. You also generate client certificates from the trusted root certificate, and then install them on each client computer. The client certificate is used to authenticate the client when it initiates a connection to the VNet. +Certificates are used by Azure to authenticate clients connecting to a VNet over a point-to-site VPN connection. Once you obtain a root certificate, you [upload](#uploadfile) the public key information to Azure. The root certificate is then considered 'trusted' by Azure for connection over P2S to the virtual network. You also generate client certificates from the trusted root certificate, and then install them on each client computer. The client certificate is used to authenticate the client when it initiates a connection to the VNet. ### Generate a root certificate @@ -93,7 +91,7 @@ Certificates are used by Azure to authenticate clients connecting to a VNet over ## Add the VPN client address pool -The client address pool is a range of private IP addresses that you specify. The clients that connect over a Point-to-Site VPN dynamically receive an IP address from this range. Use a private IP address range that doesn't overlap with the on-premises location that you connect from, or the VNet that you want to connect to. If you configure multiple protocols and SSTP is one of the protocols, then the configured address pool is split between the configured protocols equally. +The client address pool is a range of private IP addresses that you specify. The clients that connect over a point-to-site VPN dynamically receive an IP address from this range. Use a private IP address range that doesn't overlap with the on-premises location that you connect from, or the VNet that you want to connect to. If you configure multiple protocols and SSTP is one of the protocols, then the configured address pool is split between the configured protocols equally. 1. Once the virtual network gateway has been created, navigate to the **Settings** section of the virtual network gateway page. In **Settings**, select **Point-to-site configuration**. Select **Configure now** to open the configuration page. @@ -126,10 +124,10 @@ In this section, you upload public root certificate data to Azure. Once the publ 1. Navigate to your **Virtual network gateway -> Point-to-site configuration** page in the **Root certificate** section. This section is only visible if you have selected **Azure certificate** for the authentication type. 1. Make sure that you exported the root certificate as a **Base-64 encoded X.509 (.CER)** file in the previous steps. You need to export the certificate in this format so you can open the certificate with text editor. You don't need to export the private key. - :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png" alt-text="Screenshot showing export as Base-64 encoded X.509." lightbox="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png" ::: + :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64.png" alt-text="Screenshot showing export as Base-64 encoded X.509." lightbox="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/export-base-64-expand.png" ::: 1. Open the certificate with a text editor, such as Notepad. When copying the certificate data, make sure that you copy the text as one continuous line without carriage returns or line feeds. You may need to modify your view in the text editor to 'Show Symbol/Show all characters' to see the carriage returns and line feeds. Copy only the following section as one continuous line: - :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png" alt-text="Screenshot showing root certificate information in Notepad." border="false" lightbox="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png"::: + :::image type="content" source="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert.png" alt-text="Screenshot showing root certificate information in Notepad." border="false" lightbox="./media/vpn-gateway-howto-point-to-site-resource-manager-portal/notepad-root-cert-expand.png"::: 1. In the **Root certificate** section, you can add up to 20 trusted root certificates. * Paste the certificate data into the **Public certificate data** field. @@ -175,7 +173,7 @@ If you're having trouble connecting, verify that the virtual network gateway isn These instructions apply to Windows clients. 1. To verify that your VPN connection is active, open an elevated command prompt, and run *ipconfig/all*. -2. View the results. Notice that the IP address you received is one of the addresses within the Point-to-Site VPN Client Address Pool that you specified in your configuration. The results are similar to this example: +2. View the results. Notice that the IP address you received is one of the addresses within the point-to-site VPN Client Address Pool that you specified in your configuration. The results are similar to this example: ``` PPP adapter VNet1: @@ -214,7 +212,7 @@ To remove a trusted root certificate: ## To revoke a client certificate -You can revoke client certificates. The certificate revocation list allows you to selectively deny Point-to-Site connectivity based on individual client certificates. This is different than removing a trusted root certificate. If you remove a trusted root certificate .cer from Azure, it revokes the access for all client certificates generated/signed by the revoked root certificate. Revoking a client certificate, rather than the root certificate, allows the other certificates that were generated from the root certificate to continue to be used for authentication. +You can revoke client certificates. The certificate revocation list allows you to selectively deny point-to-site connectivity based on individual client certificates. This is different than removing a trusted root certificate. If you remove a trusted root certificate .cer from Azure, it revokes the access for all client certificates generated/signed by the revoked root certificate. Revoking a client certificate, rather than the root certificate, allows the other certificates that were generated from the root certificate to continue to be used for authentication. The common practice is to use the root certificate to manage access at team or organization levels, while using revoked client certificates for fine-grained access control on individual users. @@ -228,7 +226,7 @@ You can revoke a client certificate by adding the thumbprint to the revocation l 1. The thumbprint validates and is automatically added to the revocation list. A message appears on the screen that the list is updating. 1. After updating has completed, the certificate can no longer be used to connect. Clients that try to connect using this certificate receive a message saying that the certificate is no longer valid. -## Point-to-Site FAQ +## Point-to-site FAQ For frequently asked questions, see the [FAQ](vpn-gateway-vpn-faq.md#P2S). diff --git a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md index f4aa093d1cc8..4b817f245195 100644 --- a/articles/vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md +++ b/articles/vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md @@ -145,7 +145,7 @@ Set-AzVirtualNetworkGateway -VirtualNetworkGateway $Gateway -VpnClientAddressPoo Certificates are used by Azure to authenticate VPN clients for point-to-site VPNs. You upload the public key information of the root certificate to Azure. The public key is then considered 'trusted'. Client certificates must be generated from the trusted root certificate, and then installed on each client computer in the Certificates-Current User/Personal certificate store. The certificate is used to authenticate the client when it initiates a connection to the VNet. -If you use self-signed certificates, they must be created using specific parameters. You can create a self-signed certificate using the instructions for [PowerShell and Windows 10](vpn-gateway-certificates-point-to-site.md), or, if you don't have Windows 10, you can use [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). It's important that you follow the steps in the instructions when generating self-signed root certificates and client certificates. Otherwise, the certificates you generate will not be compatible with P2S connections and you receive a connection error. +If you use self-signed certificates, they must be created using specific parameters. You can create a self-signed certificate using the instructions for [PowerShell and Windows 10 or later](vpn-gateway-certificates-point-to-site.md), or, if you don't have Windows 10 or later, you can use [MakeCert](vpn-gateway-certificates-point-to-site-makecert.md). It's important that you follow the steps in the instructions when generating self-signed root certificates and client certificates. Otherwise, the certificates you generate will not be compatible with P2S connections and you receive a connection error. ### Root certificate @@ -161,7 +161,7 @@ If you use self-signed certificates, they must be created using specific paramet ## Upload root certificate public key information -Verify that your VPN gateway has finished creating. Once it has completed, you can upload the .cer file (which contains the public key information) for a trusted root certificate to Azure. Once a.cer file is uploaded, Azure can use it to authenticate clients that have installed a client certificate generated from the trusted root certificate. You can upload additional trusted root certificate files - up to a total of 20 - later, if needed. +Verify that your VPN gateway has finished creating. Once it has completed, you can upload the .cer file (which contains the public key information) for a trusted root certificate to Azure. Once a .cer file is uploaded, Azure can use it to authenticate clients that have installed a client certificate generated from the trusted root certificate. You can upload additional trusted root certificate files - up to a total of 20 - later, if needed. >[!NOTE] > You can't upload the .cer file using Azure Cloud Shell. You can either use PowerShell locally on your computer, or you can use the [Azure portal steps](vpn-gateway-howto-point-to-site-resource-manager-portal.md#uploadfile). @@ -384,4 +384,4 @@ For additional point-to-site information, see the [VPN Gateway point-to-site FAQ Once your connection is complete, you can add virtual machines to your virtual networks. For more information, see [Virtual Machines](../index.yml). To understand more about networking and virtual machines, see [Azure and Linux VM network overview](../virtual-network/network-overview.md). -For P2S troubleshooting information, [Troubleshooting: Azure point-to-site connection problems](vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md). \ No newline at end of file +For P2S troubleshooting information, [Troubleshooting: Azure point-to-site connection problems](vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md). diff --git a/articles/vpn-gateway/vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md b/articles/vpn-gateway/vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md index 947c7eddc9fb..7c007ab2cf54 100644 --- a/articles/vpn-gateway/vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md +++ b/articles/vpn-gateway/vpn-gateway-troubleshoot-vpn-point-to-site-connection-problems.md @@ -61,7 +61,7 @@ When you try and connect to an Azure virtual network gateway using IKEv2 on Wind IKEv2 is supported on Windows 10 and Server 2016. However, in order to use IKEv2, you must install updates and set a registry key value locally. OS versions prior to Windows 10 are not supported and can only use SSTP. -To prepare Windows 10 or Server 2016 for IKEv2: +To prepare Windows 10 , or Server 2016 for IKEv2: 1. Install the update. diff --git a/articles/vpn-gateway/vpn-gateway-vpn-faq.md b/articles/vpn-gateway/vpn-gateway-vpn-faq.md index 03ea13ffcceb..31c7cbf8b83f 100644 --- a/articles/vpn-gateway/vpn-gateway-vpn-faq.md +++ b/articles/vpn-gateway/vpn-gateway-vpn-faq.md @@ -6,7 +6,7 @@ author: cherylmc ms.service: vpn-gateway ms.topic: conceptual -ms.date: 12/16/2021 +ms.date: 05/25/2022 ms.author: cherylmc --- # VPN Gateway FAQ @@ -15,7 +15,7 @@ ms.author: cherylmc ### Can I connect virtual networks in different Azure regions? -Yes. There is no region constraint. One virtual network can connect to another virtual network in the same region, or in a different Azure region. +Yes. There's no region constraint. One virtual network can connect to another virtual network in the same region, or in a different Azure region. ### Can I connect virtual networks in different subscriptions? @@ -37,21 +37,21 @@ No. The following cross-premises virtual network gateway connections are supported: -* **Site-to-Site:** VPN connection over IPsec (IKE v1 and IKE v2). This type of connection requires a VPN device or RRAS. For more information, see [Site-to-Site](./tutorial-site-to-site-portal.md). -* **Point-to-Site:** VPN connection over SSTP (Secure Socket Tunneling Protocol) or IKE v2. This connection does not require a VPN device. For more information, see [Point-to-Site](vpn-gateway-howto-point-to-site-resource-manager-portal.md). -* **VNet-to-VNet:** This type of connection is the same as a Site-to-Site configuration. VNet to VNet is a VPN connection over IPsec (IKE v1 and IKE v2). It does not require a VPN device. For more information, see [VNet-to-VNet](vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). -* **Multi-Site:** This is a variation of a Site-to-Site configuration that allows you to connect multiple on-premises sites to a virtual network. For more information, see [Multi-Site](vpn-gateway-howto-multi-site-to-site-resource-manager-portal.md). +* **Site-to-site:** VPN connection over IPsec (IKE v1 and IKE v2). This type of connection requires a VPN device or RRAS. For more information, see [Site-to-site](./tutorial-site-to-site-portal.md). +* **Point-to-site:** VPN connection over SSTP (Secure Socket Tunneling Protocol) or IKE v2. This connection doesn't require a VPN device. For more information, see [Point-to-site](vpn-gateway-howto-point-to-site-resource-manager-portal.md). +* **VNet-to-VNet:** This type of connection is the same as a site-to-site configuration. VNet to VNet is a VPN connection over IPsec (IKE v1 and IKE v2). It doesn't require a VPN device. For more information, see [VNet-to-VNet](vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). +* **Multi-Site:** This is a variation of a site-to-site configuration that allows you to connect multiple on-premises sites to a virtual network. For more information, see [Multi-Site](vpn-gateway-howto-multi-site-to-site-resource-manager-portal.md). * **ExpressRoute:** ExpressRoute is a private connection to Azure from your WAN, not a VPN connection over the public Internet. For more information, see the [ExpressRoute Technical Overview](../expressroute/expressroute-introduction.md) and the [ExpressRoute FAQ](../expressroute/expressroute-faqs.md). For more information about VPN Gateway connections, see [About VPN Gateway](vpn-gateway-about-vpngateways.md). -### What is the difference between a Site-to-Site connection and Point-to-Site? +### What is the difference between a site-to-site connection and point-to-site? -**Site-to-Site** (IPsec/IKE VPN tunnel) configurations are between your on-premises location and Azure. This means that you can connect from any of your computers located on your premises to any virtual machine or role instance within your virtual network, depending on how you choose to configure routing and permissions. It's a great option for an always-available cross-premises connection and is well suited for hybrid configurations. This type of connection relies on an IPsec VPN appliance (hardware device or soft appliance), which must be deployed at the edge of your network. To create this type of connection, you must have an externally facing IPv4 address. +**Site-to-site** (IPsec/IKE VPN tunnel) configurations are between your on-premises location and Azure. This means that you can connect from any of your computers located on your premises to any virtual machine or role instance within your virtual network, depending on how you choose to configure routing and permissions. It's a great option for an always-available cross-premises connection and is well suited for hybrid configurations. This type of connection relies on an IPsec VPN appliance (hardware device or soft appliance), which must be deployed at the edge of your network. To create this type of connection, you must have an externally facing IPv4 address. -**Point-to-Site** (VPN over SSTP) configurations let you connect from a single computer from anywhere to anything located in your virtual network. It uses the Windows in-box VPN client. As part of the Point-to-Site configuration, you install a certificate and a VPN client configuration package, which contains the settings that allow your computer to connect to any virtual machine or role instance within the virtual network. It's great when you want to connect to a virtual network, but aren't located on-premises. It's also a good option when you don't have access to VPN hardware or an externally facing IPv4 address, both of which are required for a Site-to-Site connection. +**Point-to-site** (VPN over SSTP) configurations let you connect from a single computer from anywhere to anything located in your virtual network. It uses the Windows in-box VPN client. As part of the point-to-site configuration, you install a certificate and a VPN client configuration package, which contains the settings that allow your computer to connect to any virtual machine or role instance within the virtual network. It's great when you want to connect to a virtual network, but aren't located on-premises. It's also a good option when you don't have access to VPN hardware or an externally facing IPv4 address, both of which are required for a site-to-site connection. -You can configure your virtual network to use both Site-to-Site and Point-to-Site concurrently, as long as you create your Site-to-Site connection using a route-based VPN type for your gateway. Route-based VPN types are called dynamic gateways in the classic deployment model. +You can configure your virtual network to use both site-to-site and point-to-site concurrently, as long as you create your site-to-site connection using a route-based VPN type for your gateway. Route-based VPN types are called dynamic gateways in the classic deployment model. ## Privacy @@ -81,7 +81,7 @@ The custom configured traffic selectors will be proposed only when an Azure VPN ### Can I update my policy-based VPN gateway to route-based? -No. A gateway type cannot be changed from policy-based to route-based, or from route-based to policy-based. To change a gateway type, the gateway must be deleted and recreated. This process takes about 60 minutes. When you create the new gateway, you cannot retain the IP address of the original gateway. +No. A gateway type can't be changed from policy-based to route-based, or from route-based to policy-based. To change a gateway type, the gateway must be deleted and recreated. This process takes about 60 minutes. When you create the new gateway, you can't retain the IP address of the original gateway. 1. Delete any connections associated with the gateway. @@ -90,7 +90,7 @@ No. A gateway type cannot be changed from policy-based to route-based, or from r * [Azure portal](vpn-gateway-delete-vnet-gateway-portal.md) * [Azure PowerShell](vpn-gateway-delete-vnet-gateway-powershell.md) * [Azure PowerShell - classic](vpn-gateway-delete-vnet-gateway-classic-powershell.md) -1. Create a new gateway using the gateway type that you want, and then complete the VPN setup. For steps, see the [Site-to-Site tutorial](./tutorial-site-to-site-portal.md#VNetGateway). +1. Create a new gateway using the gateway type that you want, and then complete the VPN setup. For steps, see the [Site-to-site tutorial](./tutorial-site-to-site-portal.md#VNetGateway). ### Do I need a 'GatewaySubnet'? @@ -104,15 +104,15 @@ No. ### Can I get my VPN gateway IP address before I create it? -Zone-redundant and zonal gateways (gateway SKUs that have _AZ_ in the name) both rely on a _Standard SKU_ Azure public IP resource. Azure Standard SKU public IP resources must use a static allocation method. Therefore, you will have the public IP address for your VPN gateway as soon as you create the Standard SKU public IP resource you intend to use for it. +Zone-redundant and zonal gateways (gateway SKUs that have _AZ_ in the name) both rely on a _Standard SKU_ Azure public IP resource. Azure Standard SKU public IP resources must use a static allocation method. Therefore, you'll have the public IP address for your VPN gateway as soon as you create the Standard SKU public IP resource you intend to use for it. -For non-zone-redundant and non-zonal gateways (gateway SKUs that do _not_ have _AZ_ in the name), you cannot obtain the VPN gateway IP address before it is created. The IP address changes only if you delete and re-create your VPN gateway. +For non-zone-redundant and non-zonal gateways (gateway SKUs that do *not* have *AZ* in the name), you can't obtain the VPN gateway IP address before it's created. The IP address changes only if you delete and re-create your VPN gateway. ### Can I request a Static Public IP address for my VPN gateway? -Zone-redundant and zonal gateways (gateway SKUs that have _AZ_ in the name) both rely on a _Standard SKU_ Azure public IP resource. Azure Standard SKU public IP resources must use a static allocation method. +Zone-redundant and zonal gateways (gateway SKUs that have *AZ* in the name) both rely on a *Standard SKU* Azure public IP resource. Azure Standard SKU public IP resources must use a static allocation method. -For non-zone-redundant and non-zonal gateways (gateway SKUs that do _not_ have _AZ_ in the name), only dynamic IP address assignment is supported. However, this doesn't mean that the IP address changes after it has been assigned to your VPN gateway. The only time the VPN gateway IP address changes is when the gateway is deleted and then re-created. The VPN gateway public IP address doesn't change when you resize, reset, or complete other internal maintenance and upgrades of your VPN gateway. +For non-zone-redundant and non-zonal gateways (gateway SKUs that do *not* have *AZ* in the name), only dynamic IP address assignment is supported. However, this doesn't mean that the IP address changes after it has been assigned to your VPN gateway. The only time the VPN gateway IP address changes is when the gateway is deleted and then re-created. The VPN gateway public IP address doesn't change when you resize, reset, or complete other internal maintenance and upgrades of your VPN gateway. ### How does my VPN tunnel get authenticated? @@ -124,7 +124,7 @@ Yes, the Set Pre-Shared Key API and PowerShell cmdlet can be used to configure b ### Can I use other authentication options? -We are limited to using pre-shared keys (PSK) for authentication. +We're limited to using pre-shared keys (PSK) for authentication. ### How do I specify which traffic goes through the VPN gateway? @@ -147,19 +147,19 @@ Yes, you can deploy your own VPN gateways or servers in Azure either from the Az ### Why are certain ports opened on my virtual network gateway? -They are required for Azure infrastructure communication. They are protected (locked down) by Azure certificates. Without proper certificates, external entities, including the customers of those gateways, will not be able to cause any effect on those endpoints. +They're required for Azure infrastructure communication. They're protected (locked down) by Azure certificates. Without proper certificates, external entities, including the customers of those gateways, won't be able to cause any effect on those endpoints. -A virtual network gateway is fundamentally a multi-homed device with one NIC tapping into the customer private network, and one NIC facing the public network. Azure infrastructure entities cannot tap into customer private networks for compliance reasons, so they need to utilize public endpoints for infrastructure communication. The public endpoints are periodically scanned by Azure security audit. +A virtual network gateway is fundamentally a multi-homed device with one NIC tapping into the customer private network, and one NIC facing the public network. Azure infrastructure entities can't tap into customer private networks for compliance reasons, so they need to utilize public endpoints for infrastructure communication. The public endpoints are periodically scanned by Azure security audit. ### More information about gateway types, requirements, and throughput For more information, see [About VPN Gateway configuration settings](vpn-gateway-about-vpn-gateway-settings.md). -## Site-to-Site connections and VPN devices +## Site-to-site connections and VPN devices ### What should I consider when selecting a VPN device? -We have validated a set of standard Site-to-Site VPN devices in partnership with device vendors. A list of known compatible VPN devices, their corresponding configuration instructions or samples, and device specs can be found in the [About VPN devices](vpn-gateway-about-vpn-devices.md) article. All devices in the device families listed as known compatible should work with Virtual Network. To help configure your VPN device, refer to the device configuration sample or link that corresponds to appropriate device family. +We've validated a set of standard site-to-site VPN devices in partnership with device vendors. A list of known compatible VPN devices, their corresponding configuration instructions or samples, and device specs can be found in the [About VPN devices](vpn-gateway-about-vpn-devices.md) article. All devices in the device families listed as known compatible should work with Virtual Network. To help configure your VPN device, refer to the device configuration sample or link that corresponds to appropriate device family. ### Where can I find VPN device configuration settings? @@ -179,21 +179,21 @@ This is expected behavior for policy-based (also known as static routing) VPN ga ### Can I use software VPNs to connect to Azure? -We support Windows Server 2012 Routing and Remote Access (RRAS) servers for Site-to-Site cross-premises configuration. +We support Windows Server 2012 Routing and Remote Access (RRAS) servers for site-to-site cross-premises configuration. Other software VPN solutions should work with our gateway as long as they conform to industry standard IPsec implementations. Contact the vendor of the software for configuration and support instructions. -### Can I connect to a VPN gateway via Point-to-Site when located at a Site that has an active Site-to-Site connection? +### Can I connect to a VPN gateway via point-to-site when located at a Site that has an active site-to-site connection? -Yes, but the Public IP address(es) of the Point-to-Site client need to be different than the Public IP address(es) used by the Site-to-Site VPN device, or else the Point-to-Site connection will not work. Point-to-Site connections with IKEv2 cannot be initiated from the same Public IP address(es) where a Site-to-Site VPN connection is configured on the same Azure VPN gateway. +Yes, but the Public IP address(es) of the point-to-site client need to be different than the Public IP address(es) used by the site-to-site VPN device, or else the point-to-site connection won't work. point-to-site connections with IKEv2 can't be initiated from the same Public IP address(es) where a site-to-site VPN connection is configured on the same Azure VPN gateway. -## Point-to-Site - Certificate authentication +## Point-to-site - Certificate authentication This section applies to the Resource Manager deployment model. [!INCLUDE [P2S Azure cert](../../includes/vpn-gateway-faq-p2s-azurecert-include.md)] -## Point-to-Site - RADIUS authentication +## Point-to-site - RADIUS authentication This section applies to the Resource Manager deployment model. @@ -213,15 +213,15 @@ If you want to enable routing between your branch connected to ExpressRoute and Yes. See the [BGP](#bgp) section for more information. **Classic deployment model**
    -Transit traffic via Azure VPN gateway is possible using the classic deployment model, but relies on statically defined address spaces in the network configuration file. BGP is not yet supported with Azure Virtual Networks and VPN gateways using the classic deployment model. Without BGP, manually defining transit address spaces is very error prone, and not recommended. +Transit traffic via Azure VPN gateway is possible using the classic deployment model, but relies on statically defined address spaces in the network configuration file. BGP isn't yet supported with Azure Virtual Networks and VPN gateways using the classic deployment model. Without BGP, manually defining transit address spaces is very error prone, and not recommended. ### Does Azure generate the same IPsec/IKE pre-shared key for all my VPN connections for the same virtual network? -No, Azure by default generates different pre-shared keys for different VPN connections. However, you can use the Set VPN Gateway Key REST API or PowerShell cmdlet to set the key value you prefer. The key MUST only contain printable ASCII characters except space, hyphen (-) or tilde (~). +No, Azure by default generates different pre-shared keys for different VPN connections. However, you can use the `Set VPN Gateway Key` REST API or PowerShell cmdlet to set the key value you prefer. The key MUST only contain printable ASCII characters except space, hyphen (-) or tilde (~). -### Do I get more bandwidth with more Site-to-Site VPNs than for a single virtual network? +### Do I get more bandwidth with more site-to-site VPNs than for a single virtual network? -No, all VPN tunnels, including Point-to-Site VPNs, share the same Azure VPN gateway and the available bandwidth. +No, all VPN tunnels, including point-to-site VPNs, share the same Azure VPN gateway and the available bandwidth. ### Can I configure multiple tunnels between my virtual network and my on-premises site using multi-site VPN? @@ -233,15 +233,15 @@ Yes, Azure VPN gateway will honor AS Path prepending to help make routing decisi ### Can I use the RoutingWeight property when creating a new VPN VirtualNetworkGateway connection? -No, such setting is reserved for ExpressRoute gateway connections. If you want to influence routing decisions between multiple connections you need to use AS Path prepending. +No, such setting is reserved for ExpressRoute gateway connections. If you want to influence routing decisions between multiple connections, you need to use AS Path prepending. -### Can I use Point-to-Site VPNs with my virtual network with multiple VPN tunnels? +### Can I use point-to-site VPNs with my virtual network with multiple VPN tunnels? -Yes, Point-to-Site (P2S) VPNs can be used with the VPN gateways connecting to multiple on-premises sites and other virtual networks. +Yes, point-to-site (P2S) VPNs can be used with the VPN gateways connecting to multiple on-premises sites and other virtual networks. ### Can I connect a virtual network with IPsec VPNs to my ExpressRoute circuit? -Yes, this is supported. For more information, see [Configure ExpressRoute and Site-to-Site VPN connections that coexist](../expressroute/expressroute-howto-coexist-classic.md). +Yes, this is supported. For more information, see [Configure ExpressRoute and site-to-site VPN connections that coexist](../expressroute/expressroute-howto-coexist-classic.md). ## IPsec/IKE policy @@ -265,7 +265,7 @@ Yes. See [Configure forced tunneling](vpn-gateway-about-forced-tunneling.md). You have a few options. If you have RDP enabled for your VM, you can connect to your virtual machine by using the private IP address. In that case, you would specify the private IP address and the port that you want to connect to (typically 3389). You'll need to configure the port on your virtual machine for the traffic. -You can also connect to your virtual machine by private IP address from another virtual machine that's located on the same virtual network. You can't RDP to your virtual machine by using the private IP address if you are connecting from a location outside of your virtual network. For example, if you have a Point-to-Site virtual network configured and you don't establish a connection from your computer, you can't connect to the virtual machine by private IP address. +You can also connect to your virtual machine by private IP address from another virtual machine that's located on the same virtual network. You can't RDP to your virtual machine by using the private IP address if you're connecting from a location outside of your virtual network. For example, if you have a point-to-site virtual network configured and you don't establish a connection from your computer, you can't connect to the virtual machine by private IP address. ### If my virtual machine is in a virtual network with cross-premises connectivity, does all the traffic from my VM go through that connection? diff --git a/articles/web-application-firewall/afds/waf-front-door-monitor.md b/articles/web-application-firewall/afds/waf-front-door-monitor.md index 34cd149c1fb1..a43a4182f88e 100644 --- a/articles/web-application-firewall/afds/waf-front-door-monitor.md +++ b/articles/web-application-firewall/afds/waf-front-door-monitor.md @@ -16,7 +16,7 @@ Azure Web Application Firewall (WAF) monitoring and logging are provided through ## Azure Monitor -WAF with FrontDoor log is integrated with [Azure Monitor](../../azure-monitor/overview.md). Azure Monitor allows you to track diagnostic information including WAF alerts and logs. You can configure WAF monitoring within the Front Door resource in the portal under the **Diagnostics** tab or through the Azure Monitor service directly. +Front Door's WAF log is integrated with [Azure Monitor](../../azure-monitor/overview.md). Azure Monitor enables you to track diagnostic information including WAF alerts and logs. You can configure WAF monitoring within the Front Door resource in the portal under the **Diagnostics** tab, through infrastructure as code approaches, or by using the Azure Monitor service directly. From Azure portal, go to Front Door resource type. From **Monitoring**/**Metrics** tab on the left, you can add **WebApplicationFirewallRequestCount** to track number of requests that match WAF rules. Custom filters can be created based on action types and rule names. @@ -24,130 +24,157 @@ From Azure portal, go to Front Door resource type. From **Monitoring**/**Metrics ## Logs and diagnostics -WAF with Front Door provides detailed reporting on each threat it detects. Logging is integrated with Azure Diagnostics logs and alerts are recorded in a json format. These logs can be integrated with [Azure Monitor logs](../../azure-monitor/insights/azure-networking-analytics.md). +WAF with Front Door provides detailed reporting on each request, and each threat that it detects. Logging is integrated with Azure's diagnostics logs and alerts. These logs can be integrated with [Azure Monitor logs](../../azure-monitor/insights/azure-networking-analytics.md). ![WAFDiag](../media/waf-frontdoor-monitor/waf-frontdoor-diagnostics.png) -[FrontDoorAccessLog](../../frontdoor/standard-premium/how-to-logs.md#access-log) logs all requests. `FrontDoorWebApplicationFirewalllog` logs any request that matches a WAF rule and each log entry has the following schema. +Front Door provides two types of logs: access logs and WAF logs. -For logging on the classic tier, use [FrontdoorAccessLog](../../frontdoor/front-door-diagnostics.md) logs for Front Door requests and `FrontdoorWebApplicationFirewallLog` logs for matched WAF rules using the following schema: +### Access logs -| Property | Description | -| ------------- | ------------- | -|Action|Action taken on the request. WAF log shows all action values. WAF metrics show all action values, except *Log*.| -| ClientIp | The IP address of the client that made the request. If there was an X-Forwarded-For header in the request, then the Client IP is picked from the header field. | -| ClientPort | The IP port of the client that made the request. | -| Details|Additional details on the matched request | -|| matchVariableName: http parameter name of the request matched, for example, header names (max chars 100)| -|| matchVariableValue: values that triggered the match (max chars 100)| -| Host | The host header of the matched request | -| Policy | The name of the WAF policy that the request matched. | -| PolicyMode | Operations mode of the WAF policy. Possible values are "Prevention" and "Detection" | -| RequestUri | Full URI of the matched request. | -| RuleName | The name of the WAF rule that the request matched. | -| SocketIp | The source IP address seen by WAF. This IP address is based on TCP session, independent of any request headers.| -| TrackingReference | The unique reference string that identifies a request served by Front Door, also sent as X-Azure-Ref header to the client. Required for searching details in the access logs for a specific request. | +::: zone pivot="front-door-standard-premium" + +The **FrontDoorAccessLog** includes all requests that go through Front Door. For more information on the Front Door access log, including the log schema, see [Azure Front Door logs](../../frontdoor/standard-premium/how-to-logs.md#access-log). -The following query example returns WAF logs on blocked requests: +::: zone-end ::: zone pivot="front-door-classic" -``` WAFlogQuery + +The **FrontdoorAccessLog** includes all requests that go through Front Door. For more information on the Front Door access log, including the log schema, see [Monitoring metrics and logs in Azure Front Door (classic)](../../frontdoor/front-door-diagnostics.md). + +::: zone-end + +The following example query returns the access log entries: + +::: zone pivot="front-door-standard-premium" + +```kusto AzureDiagnostics -| where ResourceType == "FRONTDOORS" and Category == "FrontdoorWebApplicationFirewallLog" -| where action_s == "Block" +| where ResourceProvider == "MICROSOFT.CDN" and Category == "FrontDoorAccessLog" ``` ::: zone-end -::: zone pivot="front-door-standard-premium" -``` WAFlogQuery -AzureDiagnostics -| where ResourceProvider == "MICROSOFT.CDN" and Category == "FrontDoorWebApplicationFirewallLog" -| where action_s == "Block" +::: zone pivot="front-door-classic" +```kusto +AzureDiagnostics +| where ResourceType == "FRONTDOORS" and Category == "FrontdoorAccessLog" ``` + ::: zone-end -Here is an example of a logged request in WAF log: +The following shows an example log entry: -``` WAFlogQuerySample +```json { - "time": "2020-06-09T22:32:17.8376810Z", - "category": "FrontdoorWebApplicationFirewallLog", - "operationName": "Microsoft.Network/FrontDoorWebApplicationFirewallLog/Write", - "properties": - { - "clientIP":"xxx.xxx.xxx.xxx", - "clientPort":"52097", - "socketIP":"xxx.xxx.xxx.xxx", - "requestUri":"https://wafdemofrontdoorwebapp.azurefd.net:443/?q=%27%20or%201=1", - "ruleName":"Microsoft_DefaultRuleSet-1.1-SQLI-942100", - "policy":"WafDemoCustomPolicy", - "action":"Block", - "host":"wafdemofrontdoorwebapp.azurefd.net", - "trackingReference":"08Q3gXgAAAAAe0s71BET/QYwmqtpHO7uAU0pDRURHRTA1MDgANjMxNTAwZDAtOTRiNS00YzIwLTljY2YtNjFhNzMyOWQyYTgy", - "policyMode":"prevention", - "details": - { - "matches": - [{ - "matchVariableName":"QueryParamValue:q", - "matchVariableValue":"' or 1=1" - }] - } - } + "time": "2020-06-09T22:32:17.8383427Z", + "category": "FrontdoorAccessLog", + "operationName": "Microsoft.Network/FrontDoor/AccessLog/Write", + "properties": { + "trackingReference": "08Q3gXgAAAAAe0s71BET/QYwmqtpHO7uAU0pDRURHRTA1MDgANjMxNTAwZDAtOTRiNS00YzIwLTljY2YtNjFhNzMyOWQyYTgy", + "httpMethod": "GET", + "httpVersion": "2.0", + "requestUri": "https://wafdemofrontdoorwebapp.azurefd.net:443/?q=%27%20or%201=1", + "requestBytes": "715", + "responseBytes": "380", + "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4157.0 Safari/537.36 Edg/85.0.531.1", + "clientIp": "xxx.xxx.xxx.xxx", + "socketIp": "xxx.xxx.xxx.xxx", + "clientPort": "52097", + "timeTaken": "0.003", + "securityProtocol": "TLS 1.2", + "routingRuleName": "WAFdemoWebAppRouting", + "rulesEngineMatchNames": [], + "backendHostname": "wafdemowebappuscentral.azurewebsites.net:443", + "sentToOriginShield": false, + "httpStatusCode": "403", + "httpStatusDetails": "403", + "pop": "SJC", + "cacheStatus": "CONFIG_NOCACHE" + } } ``` -The following example query returns AccessLogs entries: +### WAF logs + +::: zone pivot="front-door-standard-premium" + +The **FrontDoorWebApplicationFirewallLog** includes requests that match a WAF rule. + +::: zone-end ::: zone pivot="front-door-classic" -``` AccessLogQuery -AzureDiagnostics -| where ResourceType == "FRONTDOORS" and Category == "FrontdoorAccessLog" +The **FrontdoorWebApplicationFirewallLog** includes any request that matches a WAF rule. + +::: zone-end + +The following table shows the values logged for each request: + +| Property | Description | +| ------------- | ------------- | +| Action |Action taken on the request. Logs include requests with all actions. Metrics include requests with all actions except *Log*.| +| ClientIp | The IP address of the client that made the request. If there was an `X-Forwarded-For` header in the request, the client IP address is taken from that header field instead. | +| ClientPort | The IP port of the client that made the request. | +| Details | Additional details on the request, including any threats that were detected.
    matchVariableName: HTTP parameter name of the request matched, for example, header names (up to 100 characters maximum).
    matchVariableValue: Values that triggered the match (up to 100 characters maximum). | +| Host | The `Host` header of the request. | +| Policy | The name of the WAF policy that processed the request. | +| PolicyMode | Operations mode of the WAF policy. Possible values are `Prevention` and `Detection`. | +| RequestUri | Full URI of the request. | +| RuleName | The name of the WAF rule that the request matched. | +| SocketIp | The source IP address seen by WAF. This IP address is based on the TCP session, and does not consider any request headers. | +| TrackingReference | The unique reference string that identifies a request served by Front Door. This value is sent to the client in the `X-Azure-Ref` response header. Use this field when searching for a specific request in the log. | + +The following example query shows the requests that were blocked by the Front Door WAF: + +::: zone pivot="front-door-classic" + +```kusto +AzureDiagnostics +| where ResourceType == "FRONTDOORS" and Category == "FrontdoorWebApplicationFirewallLog" +| where action_s == "Block" ``` + ::: zone-end ::: zone pivot="front-door-standard-premium" -``` AccessLogQuery -AzureDiagnostics -| where ResourceProvider == "MICROSOFT.CDN" and Category == "FrontDoorAccessLog" +```kusto +AzureDiagnostics +| where ResourceProvider == "MICROSOFT.CDN" and Category == "FrontDoorWebApplicationFirewallLog" +| where action_s == "Block" ``` + ::: zone-end -Here is an example of a logged request in Access log: +The following shows an example log entry, including the reason that the request was blocked: -``` AccessLogSample +```json { -"time": "2020-06-09T22:32:17.8383427Z", -"category": "FrontdoorAccessLog", -"operationName": "Microsoft.Network/FrontDoor/AccessLog/Write", - "properties": - { - "trackingReference":"08Q3gXgAAAAAe0s71BET/QYwmqtpHO7uAU0pDRURHRTA1MDgANjMxNTAwZDAtOTRiNS00YzIwLTljY2YtNjFhNzMyOWQyYTgy", - "httpMethod":"GET", - "httpVersion":"2.0", - "requestUri":"https://wafdemofrontdoorwebapp.azurefd.net:443/?q=%27%20or%201=1", - "requestBytes":"715", - "responseBytes":"380", - "userAgent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4157.0 Safari/537.36 Edg/85.0.531.1", - "clientIp":"xxx.xxx.xxx.xxx", - "socketIp":"xxx.xxx.xxx.xxx", - "clientPort":"52097", - "timeTaken":"0.003", - "securityProtocol":"TLS 1.2", - "routingRuleName":"WAFdemoWebAppRouting", - "rulesEngineMatchNames":[], - "backendHostname":"wafdemowebappuscentral.azurewebsites.net:443", - "sentToOriginShield":false, - "httpStatusCode":"403", - "httpStatusDetails":"403", - "pop":"SJC", - "cacheStatus":"CONFIG_NOCACHE" + "time": "2020-06-09T22:32:17.8376810Z", + "category": "FrontdoorWebApplicationFirewallLog", + "operationName": "Microsoft.Network/FrontDoorWebApplicationFirewallLog/Write", + "properties": { + "clientIP": "xxx.xxx.xxx.xxx", + "clientPort": "52097", + "socketIP": "xxx.xxx.xxx.xxx", + "requestUri": "https://wafdemofrontdoorwebapp.azurefd.net:443/?q=%27%20or%201=1", + "ruleName": "Microsoft_DefaultRuleSet-1.1-SQLI-942100", + "policy": "WafDemoCustomPolicy", + "action": "Block", + "host": "wafdemofrontdoorwebapp.azurefd.net", + "trackingReference": "08Q3gXgAAAAAe0s71BET/QYwmqtpHO7uAU0pDRURHRTA1MDgANjMxNTAwZDAtOTRiNS00YzIwLTljY2YtNjFhNzMyOWQyYTgy", + "policyMode": "prevention", + "details": { + "matches": [ + { + "matchVariableName": "QueryParamValue:q", + "matchVariableValue": "' or 1=1" + } + ] } + } } - ``` ## Next steps diff --git a/articles/web-application-firewall/cdn/cdn-overview.md b/articles/web-application-firewall/cdn/cdn-overview.md index 942f34afd6fa..f1c76c5a8c7d 100644 --- a/articles/web-application-firewall/cdn/cdn-overview.md +++ b/articles/web-application-firewall/cdn/cdn-overview.md @@ -5,7 +5,7 @@ services: web-application-firewall author: vhorne ms.service: web-application-firewall ms.topic: conceptual -ms.date: 08/31/2020 +ms.date: 05/26/2022 ms.author: victorh --- @@ -28,7 +28,7 @@ You can configure a WAF policy and associate that policy to one or more CDN endp - custom rules that you can create. -- managed rule sets that are a collection of Azure managed pre-configured rules. +- managed rule sets that are a collection of Azure-managed pre-configured rules. When both are present, custom rules are processed before processing the rules in a managed rule set. A rule is made of a match condition, a priority, and an action. Action types supported are: *ALLOW*, *BLOCK*, *LOG*, and *REDIRECT*. You can create a fully customized policy that meets your specific application protection requirements by combining managed and custom rules. @@ -51,7 +51,7 @@ You can choose one of the following actions when a request matches a rule's cond - *Allow*: The request passes through the WAF and is forwarded to back-end. No further lower priority rules can block this request. - *Block*: The request is blocked and WAF sends a response to the client without forwarding the request to the back-end. - *Log*: The request is logged in the WAF logs and WAF continues evaluating lower priority rules. -- *Redirect*: WAF redirects the request to the specified URI. The URI specified is a policy level setting. Once configured, all requests that match the *Redirect* action is sent to that URI. +- *Redirect*: WAF redirects the request to the specified URI. The URI specified is a policy level setting. Once configured, all requests that match the *Redirect* action are sent to that URI. ## WAF rules @@ -66,7 +66,7 @@ Custom rules can have match rules and rate control rules. You can configure the following custom match rules: -- *IP allow list and block list*: You can control access to your web applications based on a list of client IP addresses or IP address ranges. Both IPv4 and IPv6 address types are supported. This list can be configured to either block or allow those requests where the source IP matches an IP in the list. +- *IP allowlist and blocklist*: You can control access to your web applications based on a list of client IP addresses or IP address ranges. Both IPv4 and IPv6 address types are supported. This list can be configured to either block or allow those requests where the source IP matches an IP in the list. - *Geographic based access control*: You can control access to your web applications based on the country code that's associated with a client's IP address. @@ -78,7 +78,7 @@ You can configure the following custom match rules: A rate control rule limits abnormally high traffic from any client IP address. -- *Rate limiting rules*: You can configure a threshold on the number of web requests allowed from a client IP address during a one-minute duration. This rule is distinct from an IP list-based allow/block custom rule that either allows all or blocks all request from a client IP address. Rate limits can be combined with additional match conditions such as HTTP(S) parameter matches for granular rate control. +- *Rate limiting rules*: You can configure a threshold on the number of web requests allowed from a client IP address during a one-minute duration. This rule is distinct from an IP list-based allow/block custom rule that either allows all or blocks all request from a client IP address. Rate limits can be combined with more match conditions such as HTTP(S) parameter matches for granular rate control. ### Azure-managed rule sets @@ -109,4 +109,4 @@ Monitoring for WAF with CDN is integrated with Azure Monitor to track alerts and ## Next steps -- [Tutorial: Create a WAF policy with Azure CDN using the Azure portal](waf-cdn-create-portal.md) +- [Azure CLI for CDN WAF](/cli/azure/cdn/waf) diff --git a/articles/web-application-firewall/cdn/waf-cdn-create-portal.md b/articles/web-application-firewall/cdn/waf-cdn-create-portal.md deleted file mode 100644 index 0857f5a1f545..000000000000 --- a/articles/web-application-firewall/cdn/waf-cdn-create-portal.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Tutorial: Create WAF policy for Azure CDN - Azure portal' -description: In this tutorial, you learn how to create a Web Application Firewall (WAF) policy on Azure CDN using the Azure portal. -author: vhorne -ms.service: web-application-firewall -services: web-application-firewall -ms.topic: tutorial -ms.date: 09/16/2020 -ms.author: victorh ---- - -# Tutorial: Create a WAF policy on Azure CDN using the Azure portal - -This tutorial shows you how to create a basic Azure Web Application Firewall (WAF) policy and apply it to an endpoint on Azure Content Delivery Network (CDN). - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> * Create a WAF policy -> * Associate it with a CDN endpoint. You can associate a WAF policy only with endpoints that are hosted on the **Azure CDN Standard from Microsoft** SKU. -> * Configure WAF rules - -## Prerequisites - -Create an Azure CDN profile and endpoint by following the instructions in [Quickstart: Create an Azure CDN profile and endpoint](../../cdn/cdn-create-new-endpoint.md). - -## Create a Web Application Firewall policy - -First, create a basic WAF policy with a managed Default Rule Set (DRS) using the portal. - -1. On the top left-hand side of the screen, select **Create a resource**>search for **WAF**>select **Web application firewall** > select **Create**. -2. In the **Basics** tab of the **Create a WAF policy** page, enter or select the following information, accept the defaults for the remaining settings, and then select **Review + create**: - - | Setting | Value | - | --- | --- | - | Policy For |Select Azure CDN (Preview).| - | Subscription |Select your CDN Profile subscription name.| - | Resource group |Select your CDN Profile resource group name.| - | Policy name |Enter a unique name for your WAF policy.| - - :::image type="content" source="../media/waf-cdn-create-portal/basic.png" alt-text="Screenshot of the Create a W A F policy page, with a Review + create button and values entered for various settings." border="false"::: - -3. In the **Association** tab of the **Create a WAF policy** page, select **Add CDN Endpoint**, enter the following settings, and then select **Add**: - - | Setting | Value | - | --- | --- | - | CDN Profile | Select your CDN profile name.| - | Endpoint | Select the name of your endpoint, then select **Add**.| - - > [!NOTE] - > If the endpoint is associated with a WAF policy, it is shown grayed out. You must first remove the Endpoint from the associated policy, and then re-associate the endpoint to a new WAF policy. -1. Select **Review + create**, then select **Create**. - -## Configure Web Application Firewall policy (optional) - -### Change mode - -By default WAF policy is in *Detection* mode when you create a WAF policy. In *Detection* mode, WAF doesn't block any requests. Instead, requests matching the WAF rules are logged at WAF logs. - -To see WAF in action, you can change the mode settings from *Detection* to *Prevention*. In *Prevention* mode, requests that match rules that are defined in Default Rule Set (DRS) are blocked and logged at WAF logs. - - :::image type="content" source="../media/waf-cdn-create-portal/policy.png" alt-text="Screenshot of the Policy settings section. The Mode toggle is set to Prevention." border="false"::: - -### Custom rules - -To create a custom rule, select **Add custom rule** under the **Custom rules** section. This opens the custom rule configuration page. There are two types of custom rules: **match rule** and **rate limit** rule. - -The following screenshot shows a custom match rule to block a request if the query string contains the value **blockme**. - -:::image type="content" source="../media/waf-cdn-create-portal/custommatch.png" alt-text="Screenshot of the custom rule configuration page showing settings for a rule that checks whether the QueryString variable contains the value blockme." border="false"::: - -Rate limit rules require two additional fields: **Rate limit duration** and **Rate limit threshold (requests)** as shown in the following example: - -:::image type="content" source="../media/waf-cdn-create-portal/customrate.png" alt-text="Screenshot of the rate limit rule configuration page. A Rate limit duration list box and a Rate limit threshold (requests) box are visible." border="false"::: - -### Default Rule Set (DRS) - -The Azure managed Default Rule Set is enabled by default. To disable an individual rule within a rule group, expand the rules within that rule group, select the check box in front of the rule number, and select **Disable** on the tab above. To change actions types for individual rules within the rule set, select the check box in front of the rule number, and then select the **Change action** tab above. - - :::image type="content" source="../media/waf-cdn-create-portal/managed2.png" alt-text="Screenshot of the Managed rules page showing a rule set, rule groups, rules, and Enable, Disable, and Change Action buttons. One rule is checked." border="false"::: - -## Clean up resources - -When no longer needed, remove the resource group and all related resources. - - -## Next steps - -> [!div class="nextstepaction"] -> [Learn about Azure Web Application Firewall](../overview.md) diff --git a/articles/web-application-firewall/index.yml b/articles/web-application-firewall/index.yml index 873060806cb9..a7500979dac2 100644 --- a/articles/web-application-firewall/index.yml +++ b/articles/web-application-firewall/index.yml @@ -5,13 +5,13 @@ summary: Web Application Firewall (WAF) provides centralized protection of your metadata: title: Web Application Firewall documentation - description: Learn about Web Application Firewall that can deployed with Application Gateway and Front Door + description: Learn about Web Application Firewall that can be deployed with Application Gateway and Front Door services: web-application-firewall ms.service: web-application-firewall ms.topic: landing-page author: vhorne ms.author: victorh - ms.date: 06/30/2021 + ms.date: 05/26/2022 # linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | whats-new @@ -54,8 +54,6 @@ landingContent: url: ./ag/application-gateway-web-application-firewall-portal.md - text: Web Application Firewall on Front Door url: ./afds/waf-front-door-create-portal.md - - text: WAF policy on Azure CDN - url: ./cdn/waf-cdn-create-portal.md # Card - title: Manage WAF rules on Application Gateway diff --git a/articles/web-application-firewall/toc.yml b/articles/web-application-firewall/toc.yml index d70e0915e161..67ca35bb79a0 100644 --- a/articles/web-application-firewall/toc.yml +++ b/articles/web-application-firewall/toc.yml @@ -37,10 +37,6 @@ items: - name: Configure WAF policy - portal href: ./afds/waf-front-door-create-portal.md - - name: Content Delivery Network - items: - - name: Configure WAF policy - portal - href: ./cdn/waf-cdn-create-portal.md - name: Samples items: - name: Azure PowerShell diff --git a/docfx.json b/docfx.json index cac6d1af86aa..aa930a1d257f 100644 --- a/docfx.json +++ b/docfx.json @@ -253,7 +253,6 @@ "articles/azure-resource-manager/templates/**/*.md": "mumian", "articles/azure-resource-manager/bicep/**/*.md": "mumian", "articles/azure-resource-manager/troubleshooting/**/*.md": "davidsmatlak", - "articles/partner-solutions/**/*.md": "davidsmatlak", "articles/azure-vmware/**/*.md": "suzizuber", "articles/backup/**/*.md": "v-amallick", "articles/backup/**/*.yml": "v-amallick", @@ -616,7 +615,6 @@ "articles/azure-resource-manager/templates/**/*.md": "jgao", "articles/azure-resource-manager/bicep/**/*.md": "jgao", "articles/azure-resource-manager/troubleshooting/**/*.md": "davidsmatlak", - "articles/partner-solutions/**/*.md": "davidsmatlak", "articles/azure-vmware/**/*.md": "v-szuber", "articles/backup/**/*.md": "v-amallick", "articles/backup/**/*.yml": "v-amallick", diff --git a/includes/active-directory-app-provisioning-ldap.md b/includes/active-directory-app-provisioning-ldap.md index ced97879f6fe..a5fe9345323f 100644 --- a/includes/active-directory-app-provisioning-ldap.md +++ b/includes/active-directory-app-provisioning-ldap.md @@ -7,7 +7,7 @@ For important details on what this service does, how it works, and frequently as ### On-premises prerequisites - A target directory, such as Active Directory Lightweight Services (AD LDS), in which users can be created, updated, and deleted. This directory instance should not be a directory that is also used to provision users into Azure AD, because having both scenarios may create a loop with Azure AD Connect. - - A computer with at least 3 GB of RAM, to host a provisioning agent. The computer should have Windows Server 2016 or a later version of Windows Server, with connectivity to the target directory, and with outbound connectivity to login.microsoftonline.com, [other Microsoft Online Services](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide) and [Azure](/azure/azure-portal/azure-portal-safelist-urls?tabs=public-cloud) domains. An example is a Windows Server 2016 virtual machine hosted in Azure IaaS or behind a proxy. + - A computer with at least 3 GB of RAM, to host a provisioning agent. The computer should have Windows Server 2016 or a later version of Windows Server, with connectivity to the target directory, and with outbound connectivity to login.microsoftonline.com, [other Microsoft Online Services](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide) and [Azure](../articles/azure-portal/azure-portal-safelist-urls.md?tabs=public-cloud) domains. An example is a Windows Server 2016 virtual machine hosted in Azure IaaS or behind a proxy. - The .NET Framework 4.7.2 needs to be installed. - Optional: Although it is not required, it is recommended to download [Microsoft Edge for Windows Server](https://www.microsoft.com/en-us/edge?r=1) and use it in-place of Internet Explorer. diff --git a/includes/active-directory-app-provisioning-sql.md b/includes/active-directory-app-provisioning-sql.md index 5878b0f3c24f..f412e31d336a 100644 --- a/includes/active-directory-app-provisioning-sql.md +++ b/includes/active-directory-app-provisioning-sql.md @@ -10,7 +10,7 @@ For important details on what this service does, how it works, and frequently as ### On-premises prerequisites - The application relies upon a SQL database, in which records for users can be created, updated, and deleted. - - A computer with at least 3 GB of RAM, to host a provisioning agent. The computer should have Windows Server 2016 or a later version of Windows Server, with connectivity to the target database system, and with outbound connectivity to login.microsoftonline.com, [other Microsoft Online Services](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide) and [Azure](/azure/azure-portal/azure-portal-safelist-urls?tabs=public-cloud) domains. An example is a Windows Server 2016 virtual machine hosted in Azure IaaS or behind a proxy. + - A computer with at least 3 GB of RAM, to host a provisioning agent. The computer should have Windows Server 2016 or a later version of Windows Server, with connectivity to the target database system, and with outbound connectivity to login.microsoftonline.com, [other Microsoft Online Services](/microsoft-365/enterprise/urls-and-ip-address-ranges?view=o365-worldwide) and [Azure](../articles/azure-portal/azure-portal-safelist-urls.md?tabs=public-cloud) domains. An example is a Windows Server 2016 virtual machine hosted in Azure IaaS or behind a proxy. - The computer should have .NET Framework 4.7.2 and an ODBC driver for the SQL database. Configuration of the connection to the application's database is done via a wizard. Depending on the options you select, some of the wizard screens might not be available and the information might be slightly different. Use the following information to guide you in your configuration. @@ -388,4 +388,4 @@ GO ALTER TABLE [dbo].[Employees] ADD CONSTRAINT [DF_Employees_InternalGUID] DEFAULT (newid()) FOR [InternalGUID] GO -``` +``` \ No newline at end of file diff --git a/includes/api-management-portal-legacy.md b/includes/api-management-portal-legacy.md index 9cce2ff9af2c..461e77a02cdf 100644 --- a/includes/api-management-portal-legacy.md +++ b/includes/api-management-portal-legacy.md @@ -7,7 +7,7 @@ ms.author: danlep --- > [!NOTE] -> This documentation content is about the deprecated developer portal. You can continue to use it, as per usual, until its retirement in October 2023, when it will be removed from all API Management services. The deprecated portal will only receive critical security updates. Refer to the following articles for more details: +> The following documentation content is about the deprecated developer portal. You can continue to use it, as per usual, until its retirement in October 2023, when it will be removed from all API Management services. The deprecated portal will only receive critical security updates. Refer to the following articles for more details: > > - [Learn how to migrate to the new developer portal](../articles/api-management/developer-portal-deprecated-migration.md) > - [Azure API Management new developer portal overview](../articles/api-management/api-management-howto-developer-portal.md) diff --git a/includes/digital-twins-create-app-registration-selector.md b/includes/digital-twins-create-app-registration-selector.md deleted file mode 100644 index 32998472c917..000000000000 --- a/includes/digital-twins-create-app-registration-selector.md +++ /dev/null @@ -1,15 +0,0 @@ ---- - title: include file - description: include file for selecting between versions of Azure Digital Twins app registration article - services: digital-twins - author: baanders - ms.service: digital-twins - ms.topic: include - ms.date: 05/13/2021 - ms.author: baanders - ms.custom: include file ---- - -> [!div class="op_single_selector"] -> * [Portal](../articles/digital-twins/how-to-create-app-registration-portal.md) -> * [CLI](../articles/digital-twins/how-to-create-app-registration-cli.md) \ No newline at end of file diff --git a/includes/digital-twins-prereq-registration.md b/includes/digital-twins-prereq-registration.md index c94480a61bcf..c09d048298dc 100644 --- a/includes/digital-twins-prereq-registration.md +++ b/includes/digital-twins-prereq-registration.md @@ -7,6 +7,6 @@ ms.date: 10/29/2020 ms.author: baanders --- -To authenticate all the resources used in this article, you'll need to set up an [Azure Active Directory (Azure AD)](../articles/active-directory/fundamentals/active-directory-whatis.md) app registration. Follow the instructions in [Create an app registration with Azure Digital Twins access](../articles/digital-twins/how-to-create-app-registration-portal.md) to set this up. +To authenticate all the resources used in this article, you'll need to set up an [Azure Active Directory (Azure AD)](../articles/active-directory/fundamentals/active-directory-whatis.md) app registration. Follow the instructions in [Create an app registration with Azure Digital Twins access](../articles/digital-twins/how-to-create-app-registration.md) to set this up. -Once you have an app registration, you'll need the registration's **Application (client) ID**, **Directory (tenant) ID**, and **client secret value** ([find in the Azure portal](../articles/digital-twins/how-to-create-app-registration-portal.md#collect-important-values)). Take note of these values to use them later to grant access to the Azure Digital Twins APIs. \ No newline at end of file +Once you have an app registration, you'll need the registration's **Application (client) ID**, **Directory (tenant) ID**, and **client secret value** ([find in the Azure portal](../articles/digital-twins/how-to-create-app-registration.md?tabs=portal#collect-important-values)). Take note of these values to use them later to grant access to the Azure Digital Twins APIs. \ No newline at end of file diff --git a/includes/machine-learning-online-endpoint-troubleshooting.md b/includes/machine-learning-online-endpoint-troubleshooting.md index b6fd62fcfda8..eb14d4b9be12 100644 --- a/includes/machine-learning-online-endpoint-troubleshooting.md +++ b/includes/machine-learning-online-endpoint-troubleshooting.md @@ -6,6 +6,15 @@ ms.date: 05/10/2022 ms.author: larryfr --- +### Online endpoint creation fails with a V1LegacyMode == true message + +The Azure Machine Learning workspace can be configured for `v1_legacy_mode`, which disables v2 APIs. Managed online endpoints are a feature of the v2 API platform, and won't work if `v1_legacy_mode` is enabled for the workspace. + +> [!IMPORTANT] +> Check with your network security team before disabling `v1_legacy_mode`. It may have been enabled by your network security team for a reason. + +For information on how to disable `v1_legacy_mode`, see [Network isolation with v2](/azure/machine-learning/how-to-configure-network-isolation-with-v2). + ### Online endpoint creation with key-based authentication fails Use the following command to list the network rules of the Azure Key Vault for your workspace. Replace `` with the name of your key vault: @@ -25,7 +34,7 @@ The response for this command is similar to the following JSON document: } ``` -If the value of `bypass` isn't `AzureServices`, use the guidance in the [Configure key vault network settings](/azure/key-vault/general/how-to-azure-key-vault-network-security?tabs=azure-cli) to set it to `AzureServices`. +If the value of `bypass` isn't `AzureServices`, use the guidance in the [Configure key vault network settings](../articles/key-vault/general/how-to-azure-key-vault-network-security.md?tabs=azure-cli) to set it to `AzureServices`. ### Online deployments fail with an image download error diff --git a/includes/quickstarts-free-trial-note.md b/includes/quickstarts-free-trial-note.md index e8fdd0f95dd6..e98fe8f2dfa0 100644 --- a/includes/quickstarts-free-trial-note.md +++ b/includes/quickstarts-free-trial-note.md @@ -5,4 +5,4 @@ ms.topic: include ms.date: 01/18/2022 ms.author: cfowler --- -If you don't have an [Azure subscription](/azure/guides/developer/azure-developer-guide#understanding-accounts-subscriptions-and-billing), create an [Azure free account](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio) before you begin. +If you don't have an [Azure subscription](../articles/guides/developer/azure-developer-guide.md#understanding-accounts-subscriptions-and-billing), create an [Azure free account](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio) before you begin. \ No newline at end of file diff --git a/includes/storage-files-redundancy-premium-zrs.md b/includes/storage-files-redundancy-premium-zrs.md index 4c955fb65e0a..c7aa8cd22a90 100644 --- a/includes/storage-files-redundancy-premium-zrs.md +++ b/includes/storage-files-redundancy-premium-zrs.md @@ -18,4 +18,5 @@ ZRS for premium file shares is available for a subset of Azure regions: - (North America) East US - (North America) East US 2 - (North America) West US 2 +- (North America) South Central US - (South America) Brazil South diff --git a/includes/virtual-machines-n-series-windows-support.md b/includes/virtual-machines-n-series-windows-support.md index 091a5bb2118f..ad8362d5ff01 100644 --- a/includes/virtual-machines-n-series-windows-support.md +++ b/includes/virtual-machines-n-series-windows-support.md @@ -22,8 +22,8 @@ NVIDIA Tesla (CUDA) drivers for NC, NCv2, NCv3, NCasT4_v3, ND, and NDv2-series V | OS | Driver | | -------- |------------- | -| Windows Server 2019 | [451.82](http://us.download.nvidia.com/tesla/451.82/451.82-tesla-desktop-winserver-2019-2016-international.exe) (.exe) | -| Windows Server 2016 | [451.82](http://us.download.nvidia.com/tesla/451.82/451.82-tesla-desktop-winserver-2019-2016-international.exe) (.exe) | +| Windows Server 2019 | [451.82](https://us.download.nvidia.com/tesla/451.82/451.82-tesla-desktop-winserver-2019-2016-international.exe) (.exe) | +| Windows Server 2016 | [451.82](https://us.download.nvidia.com/tesla/451.82/451.82-tesla-desktop-winserver-2019-2016-international.exe) (.exe) | ### NVIDIA GRID drivers diff --git a/includes/virtual-wan-create-vwan-include.md b/includes/virtual-wan-create-vwan-include.md index 4c2ac48344ad..88e28070aa2d 100644 --- a/includes/virtual-wan-create-vwan-include.md +++ b/includes/virtual-wan-create-vwan-include.md @@ -1,7 +1,7 @@ --- ms.author: cherylmc author: cherylmc -ms.date: 04/12/2022 +ms.date: 05/25/2022 ms.service: virtual-wan ms.topic: include diff --git a/includes/vpn-gateway-faq-bgp-include.md b/includes/vpn-gateway-faq-bgp-include.md index 5a3a439f9d65..4eea63a1ca71 100644 --- a/includes/vpn-gateway-faq-bgp-include.md +++ b/includes/vpn-gateway-faq-bgp-include.md @@ -1,13 +1,9 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 03/22/2021 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file --- ### Is BGP supported on all Azure VPN Gateway SKUs? @@ -63,7 +59,7 @@ Your on-premises BGP peer address must not be the same as the public IP address ### Can I use the same ASN for both on-premises VPN networks and Azure virtual networks? -No, you must assign different ASNs between your on-premises networks and your Azure virtual networks if you're connecting them together with BGP. Azure VPN gateways have a default ASN of 65515 assigned, whether BGP is enabled or not for your cross-premises connectivity. You can override this default by assigning a different ASN when you're creating the VPN gateway, or you can change the ASN after the gateway is created. You will need to assign your on-premises ASNs to the corresponding Azure local network gateways. +No, you must assign different ASNs between your on-premises networks and your Azure virtual networks if you're connecting them together with BGP. Azure VPN gateways have a default ASN of 65515 assigned, whether BGP is enabled or not for your cross-premises connectivity. You can override this default by assigning a different ASN when you're creating the VPN gateway, or you can change the ASN after the gateway is created. You'll need to assign your on-premises ASNs to the corresponding Azure local network gateways. ### What address prefixes will Azure VPN gateways advertise to me? diff --git a/includes/vpn-gateway-faq-ipsecikepolicy-include.md b/includes/vpn-gateway-faq-ipsecikepolicy-include.md index 02c3d747fe5f..64625d7f1f12 100644 --- a/includes/vpn-gateway-faq-ipsecikepolicy-include.md +++ b/includes/vpn-gateway-faq-ipsecikepolicy-include.md @@ -1,13 +1,9 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 03/23/2021 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file --- ### Is Custom IPsec/IKE policy supported on all Azure VPN Gateway SKUs? @@ -19,7 +15,7 @@ You can only specify ***one*** policy combination for a given connection. ### Can I specify a partial policy on a connection? (for example, only IKE algorithms, but not IPsec) -No, you must specify all algorithms and parameters for both IKE (Main Mode) and IPsec (Quick Mode). Partial policy specification is not allowed. +No, you must specify all algorithms and parameters for both IKE (Main Mode) and IPsec (Quick Mode). Partial policy specification isn't allowed. ### What are the algorithms and key strengths supported in the custom policy? @@ -38,7 +34,7 @@ The following table lists the supported cryptographic algorithms and key strengt | | | > [!IMPORTANT] -> * DHGroup2048 & PFS2048 are the same as Diffie-Hellman Group **14** in IKE and IPsec PFS. See [Diffie-Hellman Groups](#DH) for the complete mappings. +> * DHGroup2048 & PFS2048 are the same as Diffie-Hellman Group **14** in IKE and IPsec PFS. See [Diffie-Hellman Groups](#DH) for the complete mappings. > * For GCMAES algorithms, you must specify the same GCMAES algorithm and key length for both IPsec Encryption and Integrity. > * IKEv2 Main Mode SA lifetime is fixed at 28,800 seconds on the Azure VPN gateways. > * QM SA Lifetimes are optional parameters. If none was specified, default values of 27,000 seconds (7.5 hrs) and 102400000 KBytes (102GB) are used. @@ -56,7 +52,7 @@ Your on-premises VPN device configuration must match or contain the following al * PFS Group * Traffic Selector (*) -The SA lifetimes are local specifications only, do not need to match. +The SA lifetimes are local specifications only, don't need to match. If you enable **UsePolicyBasedTrafficSelectors**, you need to ensure your VPN device has the matching traffic selectors defined with all combinations of your on-premises network (local network gateway) prefixes to/from the Azure virtual network prefixes, instead of any-to-any. For example, if your on-premises network prefixes are 10.1.0.0/16 and 10.2.0.0/16, and your virtual network prefixes are 192.168.0.0/16 and 172.16.0.0/16, you need to specify the following traffic selectors: * 10.1.0.0/16 <====> 192.168.0.0/16 @@ -116,24 +112,24 @@ No. IPsec/IKE policy only works on S2S VPN and VNet-to-VNet connections via the ### How do I create connections with IKEv1 or IKEv2 protocol type? -IKEv1 connections can be created on all RouteBased VPN type SKUs, except the Basic SKU, Standard SKU, and other [legacy SKUs](../articles/vpn-gateway/vpn-gateway-about-skus-legacy.md#gwsku). You can specify a connection protocol type of IKEv1 or IKEv2 while creating connections. If you do not specify a connection protocol type, IKEv2 is used as default option where applicable. For more information, see the [PowerShell cmdlet](/powershell/module/az.network/new-azvirtualnetworkgatewayconnection) documentation. For SKU types and IKEv1/IKEv2 support, see [Connect gateways to policy-based VPN devices](../articles/vpn-gateway/vpn-gateway-connect-multiple-policybased-rm-ps.md). +IKEv1 connections can be created on all RouteBased VPN type SKUs, except the Basic SKU, Standard SKU, and other [legacy SKUs](../articles/vpn-gateway/vpn-gateway-about-skus-legacy.md#gwsku). You can specify a connection protocol type of IKEv1 or IKEv2 while creating connections. If you don't specify a connection protocol type, IKEv2 is used as default option where applicable. For more information, see the [PowerShell cmdlet](/powershell/module/az.network/new-azvirtualnetworkgatewayconnection) documentation. For SKU types and IKEv1/IKEv2 support, see [Connect gateways to policy-based VPN devices](../articles/vpn-gateway/vpn-gateway-connect-multiple-policybased-rm-ps.md). -### Is transit between between IKEv1 and IKEv2 connections allowed? +### Is transit between IKEv1 and IKEv2 connections allowed? Yes. Transit between IKEv1 and IKEv2 connections is supported. ### Can I have IKEv1 site-to-site connections on Basic SKUs of RouteBased VPN type? -No. The Basic SKU does not support this. +No. The Basic SKU doesn't support this. ### Can I change the connection protocol type after the connection is created (IKEv1 to IKEv2 and vice versa)? -No. Once the connection is created, IKEv1/IKEv2 protocols cannot be changed. You must delete and recreate a new connection with the desired protocol type. +No. Once the connection is created, IKEv1/IKEv2 protocols can't be changed. You must delete and recreate a new connection with the desired protocol type. ### Why is my IKEv1 connection frequently reconnecting? -If your static routing or route based IKEv1 connection is disconnecting at routine intervals, it is likely due to VPN gateways not supporting in-place rekeys. When Main mode is getting rekeyed, your IKEv1 tunnels will disconnect and take up to 5 seconds to reconnect. Your Main mode negotiation time out value will determine the frequency of rekeys. To prevent these reconnects, you can switch to using IKEv2, which supports in-place rekeys. +If your static routing or route based IKEv1 connection is disconnecting at routine intervals, it's likely due to VPN gateways not supporting in-place rekeys. When Main mode is getting rekeyed, your IKEv1 tunnels will disconnect and take up to 5 seconds to reconnect. Your Main mode negotiation time out value will determine the frequency of rekeys. To prevent these reconnects, you can switch to using IKEv2, which supports in-place rekeys. -If your connection is reconnecting at random times, follow our [troubleshooting guide](../articles/vpn-gateway/vpn-gateway-troubleshoot-site-to-site-disconnected-intermittently.md). +If your connection is reconnecting at random times, follow our [troubleshooting guide](../articles/vpn-gateway/vpn-gateway-troubleshoot-site-to-site-disconnected-intermittently.md). ### Where can I find more configuration information for IPsec? diff --git a/includes/vpn-gateway-faq-nat-include.md b/includes/vpn-gateway-faq-nat-include.md index 1394d6227146..0313276d88f3 100644 --- a/includes/vpn-gateway-faq-nat-include.md +++ b/includes/vpn-gateway-faq-nat-include.md @@ -19,7 +19,7 @@ You can create up to 100 NAT rules (Ingress and Egress rules combined) on a VPN ### Is NAT applied to all connections on a VPN gateway? -NAT is applied to the connections with NAT rules. If a connection does not have a NAT rule, NAT will not take effect on that connection. On the same VPN gateway, you can have some connections with NAT, and other connections without NAT working together. +NAT is applied to the connections with NAT rules. If a connection doesn't have a NAT rule, NAT won't take effect on that connection. On the same VPN gateway, you can have some connections with NAT, and other connections without NAT working together. ### What types of NAT is supported on Azure VPN gateways? @@ -69,8 +69,8 @@ Yes, you can create multiple EgressSNAT rules for the same VNet address space, a ### Can I use the same IngressSNAT rule on different connections? -Yes, this is typically used when the connections are for the same on-premises network to provide redundancy. You cannot use the same Ingress rule if the connections are for different on-premises networks. +Yes, this is typically used when the connections are for the same on-premises network to provide redundancy. You can't use the same Ingress rule if the connections are for different on-premises networks. ### Do I need both Ingress and Egress rules on a NAT connection? -You need both Ingress and Egress rules on the same connection when the on-premise network address space overlaps with the VNet address space. If the VNet address space is unique among all connected networks, you do not need the EgressSNAT rule on those connections. You can use the Ingress rules to avoid address overlap among the on-premises networks. +You need both Ingress and Egress rules on the same connection when the on-premise network address space overlaps with the VNet address space. If the VNet address space is unique among all connected networks, you don't need the EgressSNAT rule on those connections. You can use the Ingress rules to avoid address overlap among the on-premises networks. diff --git a/includes/vpn-gateway-faq-p2s-all-include.md b/includes/vpn-gateway-faq-p2s-all-include.md index 0887f711e4bc..d4a7f5663063 100644 --- a/includes/vpn-gateway-faq-p2s-all-include.md +++ b/includes/vpn-gateway-faq-p2s-all-include.md @@ -1,19 +1,16 @@ --- - title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 2/10/2022 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file, devx-track-azurepowershell + ms.custom: devx-track-azurepowershell --- -### How many VPN client endpoints can I have in my Point-to-Site configuration? +### How many VPN client endpoints can I have in my point-to-site configuration? It depends on the gateway SKU. For more information on the number of connections supported, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). -### What client operating systems can I use with Point-to-Site? +### What client operating systems can I use with point-to-site? The following client operating systems are supported: @@ -25,14 +22,14 @@ The following client operating systems are supported: * Windows Server 2019 (64-bit only) * Windows Server 2022 (64-bit only) * Windows 10 -* Windows 11 +* Windows 11 * macOS version 10.11 or above * Linux (StrongSwan) * iOS [!INCLUDE [TLS](vpn-gateway-tls-updates.md)] -### Can I traverse proxies and firewalls using Point-to-Site capability? +### Can I traverse proxies and firewalls using point-to-site capability? Azure supports three types of Point-to-site VPN options: @@ -40,45 +37,46 @@ Azure supports three types of Point-to-site VPN options: * OpenVPN. OpenVPN is a SSL-based solution that can penetrate firewalls since most firewalls open the outbound TCP port that 443 SSL uses. -* IKEv2 VPN. IKEv2 VPN is a standards-based IPsec VPN solution that uses outbound UDP ports 500 and 4500 and IP protocol no. 50. Firewalls do not always open these ports, so there is a possibility of IKEv2 VPN not being able to traverse proxies and firewalls. +* IKEv2 VPN. IKEv2 VPN is a standards-based IPsec VPN solution that uses outbound UDP ports 500 and 4500 and IP protocol no. 50. Firewalls don't always open these ports, so there's a possibility of IKEv2 VPN not being able to traverse proxies and firewalls. -### If I restart a client computer configured for Point-to-Site, will the VPN automatically reconnect? +### If I restart a client computer configured for point-to-site, will the VPN automatically reconnect? Auto-reconnect is a function of the client being used. Windows supports auto-reconnect by configuring the **Always On VPN** client feature. -### Does Point-to-Site support DDNS on the VPN clients? +### Does point-to-site support DDNS on the VPN clients? -DDNS is currently not supported in Point-to-Site VPNs. +DDNS is currently not supported in point-to-site VPNs. -### Can I have Site-to-Site and Point-to-Site configurations coexist for the same virtual network? +### Can I have Site-to-Site and point-to-site configurations coexist for the same virtual network? -Yes. For the Resource Manager deployment model, you must have a RouteBased VPN type for your gateway. For the classic deployment model, you need a dynamic gateway. We do not support Point-to-Site for static routing VPN gateways or PolicyBased VPN gateways. +Yes. For the Resource Manager deployment model, you must have a RouteBased VPN type for your gateway. For the classic deployment model, you need a dynamic gateway. We don't support point-to-site for static routing VPN gateways or PolicyBased VPN gateways. -### Can I configure a Point-to-Site client to connect to multiple virtual network gateways at the same time? +### Can I configure a point-to-site client to connect to multiple virtual network gateways at the same time? -Depending on the VPN Client software used, you may be able to connect to multiple Virtual Network Gateways provided the virtual networks being connected to do not have conflicting address spaces between them or the network from with the client is connecting from. While the Azure VPN Client supports many VPN connections, only one connection can be Connected at any given time. +Depending on the VPN Client software used, you may be able to connect to multiple Virtual Network Gateways provided the virtual networks being connected to don't have conflicting address spaces between them or the network from with the client is connecting from. While the Azure VPN Client supports many VPN connections, only one connection can be Connected at any given time. -### Can I configure a Point-to-Site client to connect to multiple virtual networks at the same time? +### Can I configure a point-to-site client to connect to multiple virtual networks at the same time? -Yes, Point-to-Site client connections to a virtual network gateway that is deployed in a VNet which is peered with other VNets may have access to other peered VNets. Point-to-Site clients will be able to connect to peered VNets as long as the peered VNets are using the UseRemoteGateway / AllowGatewayTransit features. For more information, see [About Point-to-Site routing](../articles/vpn-gateway/vpn-gateway-about-point-to-site-routing.md). +Yes, point-to-site client connections to a virtual network gateway that is deployed in a VNet that is peered with other VNets may have access to other peered VNets. point-to-site clients will be able to connect to peered VNets as long as the peered VNets are using the UseRemoteGateway / AllowGatewayTransit features. For more information, see [About point-to-site routing](../articles/vpn-gateway/vpn-gateway-about-point-to-site-routing.md). -### How much throughput can I expect through Site-to-Site or Point-to-Site connections? +### How much throughput can I expect through Site-to-Site or point-to-site connections? -It's difficult to maintain the exact throughput of the VPN tunnels. IPsec and SSTP are crypto-heavy VPN protocols. Throughput is also limited by the latency and bandwidth between your premises and the Internet. For a VPN Gateway with only IKEv2 Point-to-Site VPN connections, the total throughput that you can expect depends on the Gateway SKU. For more information on throughput, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). +It's difficult to maintain the exact throughput of the VPN tunnels. IPsec and SSTP are crypto-heavy VPN protocols. Throughput is also limited by the latency and bandwidth between your premises and the Internet. For a VPN Gateway with only IKEv2 point-to-site VPN connections, the total throughput that you can expect depends on the Gateway SKU. For more information on throughput, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). -### Can I use any software VPN client for Point-to-Site that supports SSTP and/or IKEv2? +### Can I use any software VPN client for point-to-site that supports SSTP and/or IKEv2? No. You can only use the native VPN client on Windows for SSTP, and the native VPN client on Mac for IKEv2. However, you can use the OpenVPN client on all platforms to connect over OpenVPN protocol. Refer to the list of [supported client operating systems](#supportedclientos). -### Can I change the authentication type for a Point-to-Site connection? +### Can I change the authentication type for a point-to-site connection? -Yes. In the portal, navigate to the **VPN gateway -> Point-to-site configuration** page. For **Authentication type**, select the authentication types that you want to use . Please note that after you make a change to an authentication type, current clients may not be able to connect until a new VPN client configuration profile has been generated, downloaded, and applied to each VPN client. +Yes. In the portal, navigate to the **VPN gateway -> Point-to-site configuration** page. For **Authentication type**, select the authentication types that you want to use. Note that after you make a change to an authentication type, current clients may not be able to connect until a new VPN client configuration profile has been generated, downloaded, and applied to each VPN client. ### Does Azure support IKEv2 VPN with Windows? -IKEv2 is supported on Windows 10 and Server 2016. However, in order to use IKEv2 in certain OS versions, you must install updates and set a registry key value locally. Note that OS versions prior to Windows 10 are not supported and can only use SSTP or **OpenVPN® Protocol**. +IKEv2 is supported on Windows 10 and Server 2016. However, in order to use IKEv2 in certain OS versions, you must install updates and set a registry key value locally. OS versions prior to Windows 10 aren't supported and can only use SSTP or **OpenVPN® Protocol**. -> NOTE: Windows OS builds newer than Windows 10 Version 1709 and Windows Server 2016 Version 1607 do not require these steps. +> [!NOTE] +> Windows OS builds newer than Windows 10 Version 1709 and Windows Server 2016 Version 1607 do not require these steps. To prepare Windows 10 or Server 2016 for IKEv2: @@ -100,7 +98,7 @@ The traffic selectors limit in Windows determines the maximum number of address ### What happens when I configure both SSTP and IKEv2 for P2S VPN connections? -When you configure both SSTP and IKEv2 in a mixed environment (consisting of Windows and Mac devices), the Windows VPN client will always try IKEv2 tunnel first, but will fall back to SSTP if the IKEv2 connection is not successful. MacOSX will only connect via IKEv2. +When you configure both SSTP and IKEv2 in a mixed environment (consisting of Windows and Mac devices), the Windows VPN client will always try IKEv2 tunnel first, but will fall back to SSTP if the IKEv2 connection isn't successful. MacOSX will only connect via IKEv2. ### Other than Windows and Mac, which other platforms does Azure support for P2S VPN? @@ -108,7 +106,7 @@ Azure supports Windows, Mac, and Linux for P2S VPN. ### I already have an Azure VPN Gateway deployed. Can I enable RADIUS and/or IKEv2 VPN on it? -Yes, if the gateway SKU that you are using supports RADIUS and/or IKEv2, you can enable these features on gateways that you've already deployed by using PowerShell or the Azure portal. Note that the Basic SKU does not support RADIUS or IKEv2. +Yes, if the gateway SKU that you're using supports RADIUS and/or IKEv2, you can enable these features on gateways that you've already deployed by using PowerShell or the Azure portal. The Basic SKU doesn't support RADIUS or IKEv2. ### How do I remove the configuration of a P2S connection? diff --git a/includes/vpn-gateway-faq-p2s-azurecert-include.md b/includes/vpn-gateway-faq-p2s-azurecert-include.md index 2e410896a679..cd8944931ccf 100644 --- a/includes/vpn-gateway-faq-p2s-azurecert-include.md +++ b/includes/vpn-gateway-faq-p2s-azurecert-include.md @@ -1,13 +1,9 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/28/2021 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file --- [!INCLUDE [P2S FAQ All](vpn-gateway-faq-p2s-all-include.md)] @@ -15,7 +11,7 @@ Uncheck **"Verify the server's identity by validating the certificate"** or **add the server FQDN along with the certificate** when creating a profile manually. You can do this by running **rasphone** from a command prompt and picking the profile from the drop-down list. -Bypassing server identity validation is not recommended in general, but with Azure certificate authentication, the same certificate is being used for server validation in the VPN tunneling protocol (IKEv2/SSTP) and the EAP protocol. Since the server certificate and FQDN is already validated by the VPN tunneling protocol, it is redundant to validate the same again in EAP. +Bypassing server identity validation isn't recommended in general, but with Azure certificate authentication, the same certificate is being used for server validation in the VPN tunneling protocol (IKEv2/SSTP) and the EAP protocol. Since the server certificate and FQDN is already validated by the VPN tunneling protocol, it's redundant to validate the same again in EAP. ![point-to-site auth](./media/vpn-gateway-faq-p2s-all-include/servercert.png "Server Certificate") @@ -39,11 +35,11 @@ You can use your Enterprise PKI solution (your internal PKI), Azure PowerShell, * **MakeCert:** See the [MakeCert](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md) article for steps. -* **OpenSSL:** +* **OpenSSL:** - * When exporting certificates, be sure to convert the root certificate to Base64. + * When exporting certificates, be sure to convert the root certificate to Base64. - * For the client certificate: + * For the client certificate: - * When creating the private key, specify the length as 4096. - * When creating the certificate, for the *-extensions* parameter, specify *usr_cert*. + * When creating the private key, specify the length as 4096. + * When creating the certificate, for the *-extensions* parameter, specify *usr_cert*. diff --git a/includes/vpn-gateway-faq-p2s-radius-include.md b/includes/vpn-gateway-faq-p2s-radius-include.md index 158f726ed311..629ba236ae7e 100644 --- a/includes/vpn-gateway-faq-p2s-radius-include.md +++ b/includes/vpn-gateway-faq-p2s-radius-include.md @@ -1,53 +1,52 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/28/2021 + ms.date: 05/24/2022 ms.author: cherylmc - ms.custom: include file --- [!INCLUDE [P2S FAQ All](vpn-gateway-faq-p2s-all-include.md)] ### Is RADIUS authentication supported on all Azure VPN Gateway SKUs? -RADIUS authentication is supported for VpnGw1, VpnGw2, and VpnGw3 SKUs. If you are using legacy SKUs, RADIUS authentication is supported on Standard and High Performance SKUs. It is not supported on the Basic Gateway SKU.  - +RADIUS authentication is supported for VpnGw1, VpnGw2, and VpnGw3 SKUs. + +For legacy SKUs, RADIUS authentication is supported on Standard and High Performance SKUs. It isn't supported on the Basic Gateway SKU. + ### Is RADIUS authentication supported for the classic deployment model? - -No. RADIUS authentication is not supported for the classic deployment model. + +No. RADIUS authentication isn't supported for the classic deployment model. ### What is the timeout period for RADIUS requests sent to the RADIUS server? -RADIUS requests are set to timeout after 30 seconds. User defined timeout values are not supported today. - + +RADIUS requests are set to timeout after 30 seconds. User defined timeout values aren't supported today. + ### Are 3rd-party RADIUS servers supported? Yes, 3rd-party RADIUS servers are supported. - + ### What are the connectivity requirements to ensure that the Azure gateway is able to reach an on-premises RADIUS server? -A VPN Site-to-Site connection to the on-premises site, with the proper routes configured, is required.   - +A site-to-site VPN connection to the on-premises site, with the proper routes configured, is required. + ### Can traffic to an on-premises RADIUS server (from the Azure VPN gateway) be routed over an ExpressRoute connection? -No. It can only be routed over a Site-to-Site connection. - +No. It can only be routed over a site-to-site connection. + ### Is there a change in the number of SSTP connections supported with RADIUS authentication? What is the maximum number of SSTP and IKEv2 connections supported? -There is no change in the maximum number of SSTP connections supported on a gateway with RADIUS authentication. It remains 128 for SSTP, but depends on the gateway SKU for IKEv2. For more information on the number of connections supported, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). - -### What is the difference between doing certificate authentication using a RADIUS server vs. using Azure native certificate authentication (by uploading a trusted certificate to Azure). +There is no change in the maximum number of SSTP connections supported on a gateway with RADIUS authentication. It remains 128 for SSTP, but depends on the gateway SKU for IKEv2. For more information on the number of connections supported, see [Gateway SKUs](../articles/vpn-gateway/vpn-gateway-about-vpngateways.md#gwsku). + +### What is the difference between doing certificate authentication using a RADIUS server vs. using Azure native certificate authentication (by uploading a trusted certificate to Azure)? In RADIUS certificate authentication, the authentication request is forwarded to a RADIUS server that handles the actual certificate validation. This option is useful if you want to integrate with a certificate authentication infrastructure that you already have through RADIUS. - + When using Azure for certificate authentication, the Azure VPN gateway performs the validation of the certificate. You need to upload your certificate public key to the gateway. You can also specify list of revoked certificates that shouldn’t be allowed to connect. ### Does RADIUS authentication work with both IKEv2, and SSTP VPN? -Yes, RADIUS authentication is supported for both IKEv2, and SSTP VPN.  +Yes, RADIUS authentication is supported for both IKEv2, and SSTP VPN. ### Does RADIUS authentication work with the OpenVPN client? -RADIUS authentication is supported for the OpenVPN protocol only through PowerShell. +RADIUS authentication is supported for the OpenVPN protocol only through PowerShell. \ No newline at end of file diff --git a/includes/vpn-gateway-faq-point-to-site-classic-include.md b/includes/vpn-gateway-faq-point-to-site-classic-include.md index d00005f61439..36bb6c036ba4 100644 --- a/includes/vpn-gateway-faq-point-to-site-classic-include.md +++ b/includes/vpn-gateway-faq-point-to-site-classic-include.md @@ -1,17 +1,13 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/28/2021 + ms.date: 05/25/2022 ms.author: cherylmc - ms.custom: include file --- This FAQ applies to P2S connections that use the classic deployment model. -### What client operating systems can I use with Point-to-Site? +### What client operating systems can I use with point-to-site? The following client operating systems are supported: @@ -22,40 +18,41 @@ The following client operating systems are supported: * Windows Server 2012 (64-bit only) * Windows Server 2012 R2 (64-bit only) * Windows 10 +* Windows 11 -### Can I use any software VPN client that supports SSTP for Point-to-Site? +### Can I use any software VPN client that supports SSTP for point-to-site? No. Support is limited only to the listed Windows operating system versions. -### How many VPN client endpoints can exist in my Point-to-Site configuration? +### How many VPN client endpoints can exist in my point-to-site configuration? The number of VPN client endpoints depends on your gateway sku and protocol. [!INCLUDE [Aggregated throughput by SKU](./vpn-gateway-table-gwtype-aggtput-include.md)] -### Can I use my own internal PKI root CA for Point-to-Site connectivity? +### Can I use my own internal PKI root CA for point-to-site connectivity? Yes. Previously, only self-signed root certificates could be used. You can still upload up to 20 root certificates. -### Can I traverse proxies and firewalls by using Point-to-Site? +### Can I traverse proxies and firewalls by using point-to-site? Yes. We use Secure Socket Tunneling Protocol (SSTP) to tunnel through firewalls. This tunnel appears as an HTTPS connection. -### If I restart a client computer configured for Point-to-Site, will the VPN automatically reconnect? +### If I restart a client computer configured for point-to-site, will the VPN automatically reconnect? By default, the client computer won't reestablish the VPN connection automatically. -### Does Point-to-Site support auto reconnect and DDNS on the VPN clients? +### Does point-to-site support auto reconnect and DDNS on the VPN clients? -No. Auto reconnect and DDNS are currently not supported in Point-to-Site VPNs. +No. Auto reconnect and DDNS are currently not supported in point-to-site VPNs. -### Can I have Site-to-Site and Point-to-Site configurations for the same virtual network? +### Can I have Site-to-Site and point-to-site configurations for the same virtual network? -Yes. Both solutions will work if you have a RouteBased VPN type for your gateway. For the classic deployment model, you need a dynamic gateway. We don't support Point-to-Site for static routing VPN gateways or gateways that use the **-VpnType PolicyBased** cmdlet. +Yes. Both solutions will work if you have a RouteBased VPN type for your gateway. For the classic deployment model, you need a dynamic gateway. We don't support point-to-site for static routing VPN gateways or gateways that use the **-VpnType PolicyBased** cmdlet. -### Can I configure a Point-to-Site client to connect to multiple virtual networks at the same time? +### Can I configure a point-to-site client to connect to multiple virtual networks at the same time? -Yes. However, the virtual networks can't have overlapping IP prefixes and the Point-to-Site address spaces must not overlap between the virtual networks. +Yes. However, the virtual networks can't have overlapping IP prefixes and the point-to-site address spaces must not overlap between the virtual networks. -### How much throughput can I expect through Site-to-Site or Point-to-Site connections? +### How much throughput can I expect through Site-to-Site or point-to-site connections? It's difficult to maintain the exact throughput of the VPN tunnels. IPsec and SSTP are crypto-heavy VPN protocols. Throughput is also limited by the latency and bandwidth between your premises and the internet. diff --git a/includes/vpn-gateway-faq-vnet-vnet-include.md b/includes/vpn-gateway-faq-vnet-vnet-include.md index 471ac0c9d9d9..aca50593481e 100644 --- a/includes/vpn-gateway-faq-vnet-vnet-include.md +++ b/includes/vpn-gateway-faq-vnet-vnet-include.md @@ -1,5 +1,4 @@ --- - description: include file author: cherylmc ms.service: vpn-gateway ms.topic: include @@ -79,7 +78,4 @@ No. You can't have overlapping IP address ranges. ### Can there be overlapping address spaces among connected virtual networks and on-premises local sites? -No. You can't have overlapping IP address ranges. - - - +No. You can't have overlapping IP address ranges. \ No newline at end of file diff --git a/includes/vpn-gateway-generate-export-certificates-include.md b/includes/vpn-gateway-generate-export-certificates-include.md index 1b10a96a4299..fc98d4545b43 100644 --- a/includes/vpn-gateway-generate-export-certificates-include.md +++ b/includes/vpn-gateway-generate-export-certificates-include.md @@ -1,13 +1,9 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/26/2021 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file # This include is used for both Virtual WAN and VPN Gateway articles. Any changes you make must apply address both services. --- @@ -16,7 +12,7 @@ Use the New-SelfSignedCertificate cmdlet to create a self-signed root certificate. For additional parameter information, see [New-SelfSignedCertificate](/powershell/module/pki/new-selfsignedcertificate). -1. From a computer running Windows 10 or Windows Server 2016, open a Windows PowerShell console with elevated privileges. These examples do not work in the Azure Cloud Shell "Try It". You must run these examples locally. +1. From a computer running Windows 10 or later, or Windows Server 2016, open a Windows PowerShell console with elevated privileges. These examples don't work in the Azure Cloud Shell "Try It". You must run these examples locally. 1. Use the following example to create the self-signed root certificate. The following example creates a self-signed root certificate named 'P2SRootCert' that is automatically installed in 'Certificates-Current User\Personal\Certificates'. You can view the certificate by opening *certmgr.msc*, or *Manage User Certificates*. Run the following example with any necessary modifications. @@ -32,7 +28,7 @@ Use the New-SelfSignedCertificate cmdlet to create a self-signed root certificat ## Generate a client certificate -Each client computer that connects to a VNet using Point-to-Site must have a client certificate installed. You generate a client certificate from the self-signed root certificate, and then export and install the client certificate. If the client certificate is not installed, authentication fails. +Each client computer that connects to a VNet using Point-to-Site must have a client certificate installed. You generate a client certificate from the self-signed root certificate, and then export and install the client certificate. If the client certificate isn't installed, authentication fails. The following steps walk you through generating a client certificate from a self-signed root certificate. You may generate multiple client certificates from the same root certificate. When you generate client certificates using the steps below, the client certificate is automatically installed on the computer that you used to generate the certificate. If you want to install a client certificate on another client computer, you can export the certificate. @@ -40,9 +36,9 @@ The examples use the New-SelfSignedCertificate cmdlet to generate a client certi ### Example 1 - PowerShell console session still open -Use this example if you have not closed your PowerShell console after creating the self-signed root certificate. This example continues from the previous section and uses the declared '$cert' variable. If you closed the PowerShell console after creating the self-signed root certificate, or are creating additional client certificates in a new PowerShell console session, use the steps in [Example 2](#ex2). +Use this example if you haven't closed your PowerShell console after creating the self-signed root certificate. This example continues from the previous section and uses the declared '$cert' variable. If you closed the PowerShell console after creating the self-signed root certificate, or are creating additional client certificates in a new PowerShell console session, use the steps in [Example 2](#ex2). -Modify and run the example to generate a client certificate. If you run the following example without modifying it, the result is a client certificate named 'P2SChildCert'. If you want to name the child certificate something else, modify the CN value. Do not change the TextExtension when running this example. The client certificate that you generate is automatically installed in 'Certificates - Current User\Personal\Certificates' on your computer. +Modify and run the example to generate a client certificate. If you run the following example without modifying it, the result is a client certificate named 'P2SChildCert'. If you want to name the child certificate something else, modify the CN value. Don't change the TextExtension when running this example. The client certificate that you generate is automatically installed in 'Certificates - Current User\Personal\Certificates' on your computer. ```powershell New-SelfSignedCertificate -Type Custom -DnsName P2SChildCert -KeySpec Signature ` @@ -54,7 +50,7 @@ New-SelfSignedCertificate -Type Custom -DnsName P2SChildCert -KeySpec Signature ### Example 2 - New PowerShell console session -If you are creating additional client certificates, or are not using the same PowerShell session that you used to create your self-signed root certificate, use the following steps: +If you're creating additional client certificates, or aren't using the same PowerShell session that you used to create your self-signed root certificate, use the following steps: 1. Identify the self-signed root certificate that is installed on the computer. This cmdlet returns a list of certificates that are installed on your computer. @@ -83,7 +79,7 @@ If you are creating additional client certificates, or are not using the same Po $cert = Get-ChildItem -Path "Cert:\CurrentUser\My\7181AA8C1B4D34EEDB2F3D3BEC5839F3FE52D655" ``` -1. Modify and run the example to generate a client certificate. If you run the following example without modifying it, the result is a client certificate named 'P2SChildCert'. If you want to name the child certificate something else, modify the CN value. Do not change the TextExtension when running this example. The client certificate that you generate is automatically installed in 'Certificates - Current User\Personal\Certificates' on your computer. +1. Modify and run the example to generate a client certificate. If you run the following example without modifying it, the result is a client certificate named 'P2SChildCert'. If you want to name the child certificate something else, modify the CN value. Don't change the TextExtension when running this example. The client certificate that you generate is automatically installed in 'Certificates - Current User\Personal\Certificates' on your computer. ```powershell New-SelfSignedCertificate -Type Custom -DnsName P2SChildCert -KeySpec Signature ` diff --git a/includes/vpn-gateway-p2s-clientcert-include.md b/includes/vpn-gateway-p2s-clientcert-include.md index 100f61f411d9..be3fe7dfed16 100644 --- a/includes/vpn-gateway-p2s-clientcert-include.md +++ b/includes/vpn-gateway-p2s-clientcert-include.md @@ -1,11 +1,8 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 10/28/2020 + ms.date: 05/23/2022 ms.author: cherylmc --- @@ -27,8 +24,8 @@ You can generate client certificates by using the following methods: The steps in these articles generate a compatible client certificate, which you can then export and distribute. - * [Windows 10 PowerShell instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md#clientcert): These instructions require Windows 10 and PowerShell to generate certificates. The generated certificates can be installed on any supported P2S client. + * [Windows 10 or later PowerShell instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md#clientcert): These instructions require Windows 10 or later, and PowerShell to generate certificates. The generated certificates can be installed on any supported P2S client. - * [MakeCert instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md): Use MakeCert if you don't have access to a Windows 10 computer for generating certificates. Although MakeCert is deprecated, you can still use it to generate certificates. You can install the generated certificates on any supported P2S client. + * [MakeCert instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md): Use MakeCert if you don't have access to a Windows 10 or later computer for generating certificates. Although MakeCert is deprecated, you can still use it to generate certificates. You can install the generated certificates on any supported P2S client. * [Linux instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-linux.md). \ No newline at end of file diff --git a/includes/vpn-gateway-p2s-rootcert-include.md b/includes/vpn-gateway-p2s-rootcert-include.md index 3cc2ce293ca1..97234d9706fd 100644 --- a/includes/vpn-gateway-p2s-rootcert-include.md +++ b/includes/vpn-gateway-p2s-rootcert-include.md @@ -1,19 +1,15 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 10/28/2020 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file --- Obtain the .cer file for the root certificate. You can use either a root certificate that was generated with an enterprise solution (recommended), or generate a self-signed certificate. After you create the root certificate, export the public certificate data (not the private key) as a Base64 encoded X.509 .cer file. You upload this file later to Azure. * **Enterprise certificate:** If you're using an enterprise solution, you can use your existing certificate chain. Acquire the .cer file for the root certificate that you want to use. * **Self-signed root certificate:** If you aren't using an enterprise certificate solution, create a self-signed root certificate. Otherwise, the certificates you create won't be compatible with your P2S connections and clients will receive a connection error when they try to connect. You can use Azure PowerShell, MakeCert, or OpenSSL. The steps in the following articles describe how to generate a compatible self-signed root certificate: - * [Windows 10 PowerShell instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md): These instructions require Windows 10 and PowerShell to generate certificates. Client certificates that are generated from the root certificate can be installed on any supported P2S client. - * [MakeCert instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md): Use MakeCert if you don't have access to a Windows 10 computer to use to generate certificates. Although MakeCert is deprecated, you can still use it to generate certificates. Client certificates that you generate from the root certificate can be installed on any supported P2S client. + * [Windows 10 or later PowerShell instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site.md): These instructions require Windows 10 or later and PowerShell to generate certificates. Client certificates that are generated from the root certificate can be installed on any supported P2S client. + * [MakeCert instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-makecert.md): Use MakeCert if you don't have access to a Windows 10 or later computer to use to generate certificates. Although MakeCert is deprecated, you can still use it to generate certificates. Client certificates that you generate from the root certificate can be installed on any supported P2S client. * [Linux instructions](../articles/vpn-gateway/vpn-gateway-certificates-point-to-site-linux.md). \ No newline at end of file diff --git a/includes/vpn-gateway-table-gwtype-aggtput-include.md b/includes/vpn-gateway-table-gwtype-aggtput-include.md index c5f9f5f69d7d..bbf42ba53491 100644 --- a/includes/vpn-gateway-table-gwtype-aggtput-include.md +++ b/includes/vpn-gateway-table-gwtype-aggtput-include.md @@ -38,10 +38,12 @@ * IPsec limits the throughput of a single VPN tunnel (both S2S and P2S connections) to 1.25Gbps. **If you have a lot of P2S connections, it can negatively impact your S2S connections.** The Aggregate Throughput Benchmarks were tested by maximizing a combination of S2S and P2S connections. A single P2S or S2S connection can have a much lower throughput than the 1.25Gbps limit. * Note that all benchmarks aren't guaranteed due to Internet traffic conditions and your application behaviors -To help our customers understand the relative performance of SKUs using different algorithms, we used publicly available iPerf and CTSTraffic tools to measure performances for site-to-site connections. The table below lists the results of performance tests for Generation 1, VpnGw SKUs. As you can see, the best performance is obtained when we used GCMAES256 algorithm for both IPsec Encryption and Integrity. We got average performance when using AES256 for IPsec Encryption and SHA256 for Integrity. When we used DES3 for IPsec Encryption and SHA256 for Integrity we got lowest performance. +To help our customers understand the relative performance of SKUs using different algorithms, we used publicly available iPerf and CTSTraffic tools to measure performances for site-to-site connections. The table below lists the results of performance tests for VpnGw SKUs. As you can see, the best performance is obtained when we used GCMAES256 algorithm for both IPsec Encryption and Integrity. We got average performance when using AES256 for IPsec Encryption and SHA256 for Integrity. When we used DES3 for IPsec Encryption and SHA256 for Integrity we got lowest performance. A VPN tunnel connects to a VPN gateway instance. Each instance throughput is mentioned in the above throughput table and is available aggregated across all tunnels connecting to that instance. +The table below shows the observed bandwidth and packets per second throughput per tunnel for the different gateway SKUs. All testing was performed between gateways (endpoints) within Azure across different regions with 100 connections and under standard load conditions. + |**Generation**|**SKU** | **Algorithms
    used** | **Throughput
    observed per tunnel** | **Packets per second per tunnel
    observed** | |--- |--- | --- | --- | --- | |**Generation1**|**VpnGw1**| GCMAES256
    AES256 & SHA256
    DES3 & SHA256| 650 Mbps
    500 Mbps
    130 Mbps | 62,000
    47,000
    12,000| @@ -58,4 +60,4 @@ A VPN tunnel connects to a VPN gateway instance. Each instance throughput is men |**Generation2**|**VpnGw2AZ**| GCMAES256
    AES256 & SHA256
    DES3 & SHA256| 1.25 Gbps
    550 Mbps
    130 Mbps | 120,000
    52,000
    12,000| |**Generation2**|**VpnGw3AZ**| GCMAES256
    AES256 & SHA256
    DES3 & SHA256| 1.5 Gbps
    700 Mbps
    140 Mbps | 140,000
    66,000
    13,000| |**Generation2**|**VpnGw4AZ**| GCMAES256
    AES256 & SHA256
    DES3 & SHA256| 2.3 Gbps
    700 Mbps
    140 Mbps | 220,000
    66,000
    13,000| -|**Generation2**|**VpnGw5AZ**| GCMAES256
    AES256 & SHA256
    DES3 & SHA256| 2.3 Gbps
    700 Mbps
    140 Mbps | 220,000
    66,000
    13,000| \ No newline at end of file +|**Generation2**|**VpnGw5AZ**| GCMAES256
    AES256 & SHA256
    DES3 & SHA256| 2.3 Gbps
    700 Mbps
    140 Mbps | 220,000
    66,000
    13,000| diff --git a/includes/vpn-gateway-tls-change.md b/includes/vpn-gateway-tls-change.md index fa357ba72f71..d0bb56550e2c 100644 --- a/includes/vpn-gateway-tls-change.md +++ b/includes/vpn-gateway-tls-change.md @@ -1,12 +1,8 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 06/05/2018 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file --- -Starting July 1, 2018, support is being removed for TLS 1.0 and 1.1 from Azure VPN Gateway. VPN Gateway will support only TLS 1.2. Only point-to-site connections are impacted; site-to-site connections will not be affected. If you’re using TLS for point-to-site VPNs on Windows 10 clients, you don’t need to take any action. If you are using TLS for point-to-site connections on Windows 7 and Windows 8 clients, see the [VPN Gateway FAQ](../articles/vpn-gateway/vpn-gateway-vpn-faq.md#P2S) for update instructions. \ No newline at end of file +Starting July 1, 2018, support is being removed for TLS 1.0 and 1.1 from Azure VPN Gateway. VPN Gateway will support only TLS 1.2. Only point-to-site connections are impacted; site-to-site connections won't be affected. If you’re using TLS for point-to-site VPNs on Windows 10 or later clients, you don’t need to take any action. If you're using TLS for point-to-site connections on Windows 7 and Windows 8 clients, see the [VPN Gateway FAQ](../articles/vpn-gateway/vpn-gateway-vpn-faq.md#P2S) for update instructions. \ No newline at end of file diff --git a/includes/vpn-gateway-tls-include.md b/includes/vpn-gateway-tls-include.md index f63087515e32..5f1a4a6bd1d2 100644 --- a/includes/vpn-gateway-tls-include.md +++ b/includes/vpn-gateway-tls-include.md @@ -1,13 +1,10 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 07/27/2018 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file --- diff --git a/includes/vpn-gateway-virtual-wan-vpn-profile-intune.md b/includes/vpn-gateway-virtual-wan-vpn-profile-intune.md index d14b7cf66c9a..de680536c2f5 100644 --- a/includes/vpn-gateway-virtual-wan-vpn-profile-intune.md +++ b/includes/vpn-gateway-virtual-wan-vpn-profile-intune.md @@ -1,18 +1,14 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 04/26/2021 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file # This include is used for both Virtual WAN and VPN Gateway articles. Any changes you make must apply address both services. --- -You can deploy profiles for Azure VPN clients (Windows 10) by using Microsoft Intune. This article helps you create an Intune profile using custom settings. +You can deploy profiles for Azure VPN clients (Windows 10 or later) by using Microsoft Intune. This article helps you create an Intune profile using custom settings. > [!NOTE] >* This article applies to deploying profiles that use Azure Active Directory for authentication only. @@ -21,7 +17,7 @@ You can deploy profiles for Azure VPN clients (Windows 10) by using Microsoft In ## Prerequisites * Devices are already enrolled with Intune MDM. -* The Azure VPN Client for Windows 10 is already deployed on the client machine. +* The Azure VPN Client for Windows 10 or later is already deployed on the client machine. * Only Windows version 19H2 or higher is supported. ## Modify XML diff --git a/includes/vpn-gateway-vwan-always-on-device.md b/includes/vpn-gateway-vwan-always-on-device.md index 0eab77642e4f..060b7f531267 100644 --- a/includes/vpn-gateway-vwan-always-on-device.md +++ b/includes/vpn-gateway-vwan-always-on-device.md @@ -1,13 +1,9 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include ms.date: 05/26/2021 ms.author: cherylmc - ms.custom: include file # this file is used for both virtual wan and vpn gateway. When modifying, make sure that your changes work for both environments. --- @@ -18,12 +14,12 @@ The following requirements must be met in order to successfully establish a devi * The tunnel is only configurable for the Windows built-in VPN solution and is established using IKEv2 with computer certificate authentication. * Only one device tunnel can be configured per device. -1. Install client certificates on the Windows 10 client using the [point-to-site VPN client](../articles/vpn-gateway/point-to-site-how-to-vpn-client-install-azure-cert.md) article. The certificate needs to be in the Local Machine store. +1. Install client certificates on the Windows 10 or later client using the [point-to-site VPN client](../articles/vpn-gateway/point-to-site-how-to-vpn-client-install-azure-cert.md) article. The certificate needs to be in the Local Machine store. 1. Create a VPN Profile and configure device tunnel in the context of the LOCAL SYSTEM account using [these instructions](/windows-server/remote/remote-access/vpn/vpn-device-tunnel-config#vpn-device-tunnel-configuration). ### Configuration example for device tunnel -After you have configured the virtual network gateway and installed the client certificate in the Local Machine store on the Windows 10 client, use the following examples to configure a client device tunnel: +After you have configured the virtual network gateway and installed the client certificate in the Local Machine store on the Windows 10 or later client, use the following examples to configure a client device tunnel: 1. Copy the following text and save it as ***devicecert.ps1***. diff --git a/includes/vpn-gateway-vwan-always-on-intro.md b/includes/vpn-gateway-vwan-always-on-intro.md index 89614ade57fd..47234e97680c 100644 --- a/includes/vpn-gateway-vwan-always-on-intro.md +++ b/includes/vpn-gateway-vwan-always-on-intro.md @@ -1,20 +1,17 @@ --- title: include file - description: include file - services: vpn-gateway author: cherylmc ms.service: vpn-gateway ms.topic: include - ms.date: 05/26/2021 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file # this file is used for both virtual wan and vpn gateway. When modifying, make sure that your changes work for both environments. --- -A new feature of the Windows 10 VPN client, Always On, is the ability to maintain a VPN connection. With Always On, the active VPN profile can connect automatically and remain connected based on triggers, such as user sign-in, network state change, or device screen active. +A new feature of the Windows 10 or later VPN client, Always On, is the ability to maintain a VPN connection. With Always On, the active VPN profile can connect automatically and remain connected based on triggers, such as user sign-in, network state change, or device screen active. -You can use gateways with Windows 10 Always On to establish persistent user tunnels and device tunnels to Azure. +You can use gateways with Always On to establish persistent user tunnels and device tunnels to Azure. Always On VPN connections include either of two types of tunnels: diff --git a/includes/vpn-gateway-vwan-always-on-user.md b/includes/vpn-gateway-vwan-always-on-user.md index 9d418f3a571c..3fd3d5a9455a 100644 --- a/includes/vpn-gateway-vwan-always-on-user.md +++ b/includes/vpn-gateway-vwan-always-on-user.md @@ -1,25 +1,22 @@ --- - title: include file - description: include file - services: vpn-gateway + ms.topic: include author: cherylmc ms.service: vpn-gateway - ms.topic: include - ms.date: 05/26/2021 + ms.date: 05/23/2022 ms.author: cherylmc - ms.custom: include file + # this file is used for both virtual wan and vpn gateway. When modifying, make sure that your changes work for both environments. --- -1. Install client certificates on the Windows 10 client, as shown in this [point-to-site VPN client](../articles/vpn-gateway/point-to-site-how-to-vpn-client-install-azure-cert.md) article. The certificate must be in the current user store. +1. Install client certificates on the Windows 10 or later client, as shown in this [point-to-site VPN client](../articles/vpn-gateway/point-to-site-how-to-vpn-client-install-azure-cert.md) article. The certificate must be in the current user store. -1. Configure the Always On VPN client through PowerShell, Configuration Manager, or Intune by following the instructions in [Configure Windows 10 client Always On VPN connections](/windows-server/remote/remote-access/vpn/always-on-vpn/deploy/vpn-deploy-client-vpn-connections). +1. Configure the Always On VPN client through PowerShell, Configuration Manager, or Intune by following the instructions in [Configure Windows 10 or later client Always On VPN connections](/windows-server/remote/remote-access/vpn/always-on-vpn/deploy/vpn-deploy-client-vpn-connections). ### Example configuration for the user tunnel -After you've configured the virtual network gateway and installed the client certificate in the local machine store on the Windows 10 client, configure a client device tunnel by using the following examples: +After you've configured the virtual network gateway and installed the client certificate in the local machine store on the Windows 10 or later client, configure a client device tunnel by using the following examples: 1. Copy the following text, and save it as *usercert.ps1*: